summaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/adb.h2
-rw-r--r--include/linux/auto_fs4.h51
-rw-r--r--include/linux/bootmem.h1
-rw-r--r--include/linux/compat.h18
-rw-r--r--include/linux/compat_ioctl.h2
-rw-r--r--include/linux/device-mapper.h1
-rw-r--r--include/linux/dm-ioctl.h17
-rw-r--r--include/linux/fs.h10
-rw-r--r--include/linux/futex.h89
-rw-r--r--include/linux/genhd.h3
-rw-r--r--include/linux/i2c-id.h4
-rw-r--r--include/linux/kernel.h2
-rw-r--r--include/linux/m48t86.h16
-rw-r--r--include/linux/memory.h1
-rw-r--r--include/linux/mmzone.h91
-rw-r--r--include/linux/netfilter_ipv4/ip_conntrack.h17
-rw-r--r--include/linux/nfsd/export.h29
-rw-r--r--include/linux/nodemask.h4
-rw-r--r--include/linux/notifier.h96
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/pfn.h9
-rw-r--r--include/linux/raid/md.h3
-rw-r--r--include/linux/raid/md_k.h21
-rw-r--r--include/linux/raid/md_p.h32
-rw-r--r--include/linux/raid/raid5.h25
-rw-r--r--include/linux/rtc.h92
-rw-r--r--include/linux/sched.h6
-rw-r--r--include/linux/sunrpc/cache.h145
-rw-r--r--include/linux/sunrpc/svcauth.h12
-rw-r--r--include/linux/threads.h3
-rw-r--r--include/linux/topology.h9
-rw-r--r--include/linux/x1205.h31
32 files changed, 555 insertions, 288 deletions
diff --git a/include/linux/adb.h b/include/linux/adb.h
index e9fdc63483c7..b7305b178279 100644
--- a/include/linux/adb.h
+++ b/include/linux/adb.h
@@ -85,7 +85,7 @@ enum adb_message {
ADB_MSG_POST_RESET /* Called after resetting the bus (re-do init & register) */
};
extern struct adb_driver *adb_controller;
-extern struct notifier_block *adb_client_list;
+extern struct blocking_notifier_head adb_client_list;
int adb_request(struct adb_request *req, void (*done)(struct adb_request *),
int flags, int nbytes, ...);
diff --git a/include/linux/auto_fs4.h b/include/linux/auto_fs4.h
index 9343c89d843c..0a6bc52ffe88 100644
--- a/include/linux/auto_fs4.h
+++ b/include/linux/auto_fs4.h
@@ -19,18 +19,37 @@
#undef AUTOFS_MIN_PROTO_VERSION
#undef AUTOFS_MAX_PROTO_VERSION
-#define AUTOFS_PROTO_VERSION 4
+#define AUTOFS_PROTO_VERSION 5
#define AUTOFS_MIN_PROTO_VERSION 3
-#define AUTOFS_MAX_PROTO_VERSION 4
+#define AUTOFS_MAX_PROTO_VERSION 5
-#define AUTOFS_PROTO_SUBVERSION 7
+#define AUTOFS_PROTO_SUBVERSION 0
/* Mask for expire behaviour */
#define AUTOFS_EXP_IMMEDIATE 1
#define AUTOFS_EXP_LEAVES 2
-/* New message type */
-#define autofs_ptype_expire_multi 2 /* Expire entry (umount request) */
+/* Daemon notification packet types */
+enum autofs_notify {
+ NFY_NONE,
+ NFY_MOUNT,
+ NFY_EXPIRE
+};
+
+/* Kernel protocol version 4 packet types */
+
+/* Expire entry (umount request) */
+#define autofs_ptype_expire_multi 2
+
+/* Kernel protocol version 5 packet types */
+
+/* Indirect mount missing and expire requests. */
+#define autofs_ptype_missing_indirect 3
+#define autofs_ptype_expire_indirect 4
+
+/* Direct mount missing and expire requests */
+#define autofs_ptype_missing_direct 5
+#define autofs_ptype_expire_direct 6
/* v4 multi expire (via pipe) */
struct autofs_packet_expire_multi {
@@ -40,14 +59,36 @@ struct autofs_packet_expire_multi {
char name[NAME_MAX+1];
};
+/* autofs v5 common packet struct */
+struct autofs_v5_packet {
+ struct autofs_packet_hdr hdr;
+ autofs_wqt_t wait_queue_token;
+ __u32 dev;
+ __u64 ino;
+ __u32 uid;
+ __u32 gid;
+ __u32 pid;
+ __u32 tgid;
+ __u32 len;
+ char name[NAME_MAX+1];
+};
+
+typedef struct autofs_v5_packet autofs_packet_missing_indirect_t;
+typedef struct autofs_v5_packet autofs_packet_expire_indirect_t;
+typedef struct autofs_v5_packet autofs_packet_missing_direct_t;
+typedef struct autofs_v5_packet autofs_packet_expire_direct_t;
+
union autofs_packet_union {
struct autofs_packet_hdr hdr;
struct autofs_packet_missing missing;
struct autofs_packet_expire expire;
struct autofs_packet_expire_multi expire_multi;
+ struct autofs_v5_packet v5_packet;
};
#define AUTOFS_IOC_EXPIRE_MULTI _IOW(0x93,0x66,int)
+#define AUTOFS_IOC_EXPIRE_INDIRECT AUTOFS_IOC_EXPIRE_MULTI
+#define AUTOFS_IOC_EXPIRE_DIRECT AUTOFS_IOC_EXPIRE_MULTI
#define AUTOFS_IOC_PROTOSUBVER _IOR(0x93,0x67,int)
#define AUTOFS_IOC_ASKREGHOST _IOR(0x93,0x68,int)
#define AUTOFS_IOC_TOGGLEREGHOST _IOR(0x93,0x69,int)
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 7155452fb4a8..de3eb8d8ae26 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -38,6 +38,7 @@ typedef struct bootmem_data {
unsigned long last_pos;
unsigned long last_success; /* Previous allocation point. To speed
* up searching */
+ struct list_head list;
} bootmem_data_t;
extern unsigned long __init bootmem_bootmap_pages (unsigned long);
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 24d659cdbafe..6d3a654be1ae 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -147,6 +147,24 @@ typedef struct compat_sigevent {
} _sigev_un;
} compat_sigevent_t;
+struct compat_robust_list {
+ compat_uptr_t next;
+};
+
+struct compat_robust_list_head {
+ struct compat_robust_list list;
+ compat_long_t futex_offset;
+ compat_uptr_t list_op_pending;
+};
+
+extern void compat_exit_robust_list(struct task_struct *curr);
+
+asmlinkage long
+compat_sys_set_robust_list(struct compat_robust_list_head __user *head,
+ compat_size_t len);
+asmlinkage long
+compat_sys_get_robust_list(int pid, compat_uptr_t *head_ptr,
+ compat_size_t __user *len_ptr);
long compat_sys_semctl(int first, int second, int third, void __user *uptr);
long compat_sys_msgsnd(int first, int second, int third, void __user *uptr);
diff --git a/include/linux/compat_ioctl.h b/include/linux/compat_ioctl.h
index efb518f16bb3..89ab677cb993 100644
--- a/include/linux/compat_ioctl.h
+++ b/include/linux/compat_ioctl.h
@@ -140,6 +140,7 @@ COMPATIBLE_IOCTL(DM_TABLE_DEPS_32)
COMPATIBLE_IOCTL(DM_TABLE_STATUS_32)
COMPATIBLE_IOCTL(DM_LIST_VERSIONS_32)
COMPATIBLE_IOCTL(DM_TARGET_MSG_32)
+COMPATIBLE_IOCTL(DM_DEV_SET_GEOMETRY_32)
COMPATIBLE_IOCTL(DM_VERSION)
COMPATIBLE_IOCTL(DM_REMOVE_ALL)
COMPATIBLE_IOCTL(DM_LIST_DEVICES)
@@ -155,6 +156,7 @@ COMPATIBLE_IOCTL(DM_TABLE_DEPS)
COMPATIBLE_IOCTL(DM_TABLE_STATUS)
COMPATIBLE_IOCTL(DM_LIST_VERSIONS)
COMPATIBLE_IOCTL(DM_TARGET_MSG)
+COMPATIBLE_IOCTL(DM_DEV_SET_GEOMETRY)
/* Big K */
COMPATIBLE_IOCTL(PIO_FONT)
COMPATIBLE_IOCTL(GIO_FONT)
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 51e0e95a421a..aee10b2ea4c6 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -97,6 +97,7 @@ struct io_restrictions {
unsigned short hardsect_size;
unsigned int max_segment_size;
unsigned long seg_boundary_mask;
+ unsigned char no_cluster; /* inverted so that 0 is default */
};
struct dm_target {
diff --git a/include/linux/dm-ioctl.h b/include/linux/dm-ioctl.h
index fa75ba0d635e..c67c6786612a 100644
--- a/include/linux/dm-ioctl.h
+++ b/include/linux/dm-ioctl.h
@@ -80,6 +80,16 @@
*
* DM_TARGET_MSG:
* Pass a message string to the target at a specific offset of a device.
+ *
+ * DM_DEV_SET_GEOMETRY:
+ * Set the geometry of a device by passing in a string in this format:
+ *
+ * "cylinders heads sectors_per_track start_sector"
+ *
+ * Beware that CHS geometry is nearly obsolete and only provided
+ * for compatibility with dm devices that can be booted by a PC
+ * BIOS. See struct hd_geometry for range limits. Also note that
+ * the geometry is erased if the device size changes.
*/
/*
@@ -218,6 +228,7 @@ enum {
/* Added later */
DM_LIST_VERSIONS_CMD,
DM_TARGET_MSG_CMD,
+ DM_DEV_SET_GEOMETRY_CMD
};
/*
@@ -247,6 +258,7 @@ typedef char ioctl_struct[308];
#define DM_TABLE_STATUS_32 _IOWR(DM_IOCTL, DM_TABLE_STATUS_CMD, ioctl_struct)
#define DM_LIST_VERSIONS_32 _IOWR(DM_IOCTL, DM_LIST_VERSIONS_CMD, ioctl_struct)
#define DM_TARGET_MSG_32 _IOWR(DM_IOCTL, DM_TARGET_MSG_CMD, ioctl_struct)
+#define DM_DEV_SET_GEOMETRY_32 _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, ioctl_struct)
#endif
#define DM_IOCTL 0xfd
@@ -270,11 +282,12 @@ typedef char ioctl_struct[308];
#define DM_LIST_VERSIONS _IOWR(DM_IOCTL, DM_LIST_VERSIONS_CMD, struct dm_ioctl)
#define DM_TARGET_MSG _IOWR(DM_IOCTL, DM_TARGET_MSG_CMD, struct dm_ioctl)
+#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
#define DM_VERSION_MAJOR 4
-#define DM_VERSION_MINOR 5
+#define DM_VERSION_MINOR 6
#define DM_VERSION_PATCHLEVEL 0
-#define DM_VERSION_EXTRA "-ioctl (2005-10-04)"
+#define DM_VERSION_EXTRA "-ioctl (2006-02-17)"
/* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 9d9674946956..680d913350e7 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -410,6 +410,9 @@ struct block_device {
struct list_head bd_inodes;
void * bd_holder;
int bd_holders;
+#ifdef CONFIG_SYSFS
+ struct list_head bd_holder_list;
+#endif
struct block_device * bd_contains;
unsigned bd_block_size;
struct hd_struct * bd_part;
@@ -1399,6 +1402,13 @@ extern int blkdev_get(struct block_device *, mode_t, unsigned);
extern int blkdev_put(struct block_device *);
extern int bd_claim(struct block_device *, void *);
extern void bd_release(struct block_device *);
+#ifdef CONFIG_SYSFS
+extern int bd_claim_by_disk(struct block_device *, void *, struct gendisk *);
+extern void bd_release_from_disk(struct block_device *, struct gendisk *);
+#else
+#define bd_claim_by_disk(bdev, holder, disk) bd_claim(bdev, holder)
+#define bd_release_from_disk(bdev, disk) bd_release(bdev)
+#endif
/* fs/char_dev.c */
extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *);
diff --git a/include/linux/futex.h b/include/linux/futex.h
index 10f96c31971e..966a5b3da439 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -1,6 +1,8 @@
#ifndef _LINUX_FUTEX_H
#define _LINUX_FUTEX_H
+#include <linux/sched.h>
+
/* Second argument to futex syscall */
@@ -11,10 +13,97 @@
#define FUTEX_CMP_REQUEUE 4
#define FUTEX_WAKE_OP 5
+/*
+ * Support for robust futexes: the kernel cleans up held futexes at
+ * thread exit time.
+ */
+
+/*
+ * Per-lock list entry - embedded in user-space locks, somewhere close
+ * to the futex field. (Note: user-space uses a double-linked list to
+ * achieve O(1) list add and remove, but the kernel only needs to know
+ * about the forward link)
+ *
+ * NOTE: this structure is part of the syscall ABI, and must not be
+ * changed.
+ */
+struct robust_list {
+ struct robust_list __user *next;
+};
+
+/*
+ * Per-thread list head:
+ *
+ * NOTE: this structure is part of the syscall ABI, and must only be
+ * changed if the change is first communicated with the glibc folks.
+ * (When an incompatible change is done, we'll increase the structure
+ * size, which glibc will detect)
+ */
+struct robust_list_head {
+ /*
+ * The head of the list. Points back to itself if empty:
+ */
+ struct robust_list list;
+
+ /*
+ * This relative offset is set by user-space, it gives the kernel
+ * the relative position of the futex field to examine. This way
+ * we keep userspace flexible, to freely shape its data-structure,
+ * without hardcoding any particular offset into the kernel:
+ */
+ long futex_offset;
+
+ /*
+ * The death of the thread may race with userspace setting
+ * up a lock's links. So to handle this race, userspace first
+ * sets this field to the address of the to-be-taken lock,
+ * then does the lock acquire, and then adds itself to the
+ * list, and then clears this field. Hence the kernel will
+ * always have full knowledge of all locks that the thread
+ * _might_ have taken. We check the owner TID in any case,
+ * so only truly owned locks will be handled.
+ */
+ struct robust_list __user *list_op_pending;
+};
+
+/*
+ * Are there any waiters for this robust futex:
+ */
+#define FUTEX_WAITERS 0x80000000
+
+/*
+ * The kernel signals via this bit that a thread holding a futex
+ * has exited without unlocking the futex. The kernel also does
+ * a FUTEX_WAKE on such futexes, after setting the bit, to wake
+ * up any possible waiters:
+ */
+#define FUTEX_OWNER_DIED 0x40000000
+
+/*
+ * The rest of the robust-futex field is for the TID:
+ */
+#define FUTEX_TID_MASK 0x3fffffff
+
+/*
+ * This limit protects against a deliberately circular list.
+ * (Not worth introducing an rlimit for it)
+ */
+#define ROBUST_LIST_LIMIT 2048
+
long do_futex(unsigned long uaddr, int op, int val,
unsigned long timeout, unsigned long uaddr2, int val2,
int val3);
+extern int handle_futex_death(u32 __user *uaddr, struct task_struct *curr);
+
+#ifdef CONFIG_FUTEX
+extern void exit_robust_list(struct task_struct *curr);
+#else
+static inline void exit_robust_list(struct task_struct *curr)
+{
+}
+#endif
+
#define FUTEX_OP_SET 0 /* *(int *)UADDR2 = OPARG; */
#define FUTEX_OP_ADD 1 /* *(int *)UADDR2 += OPARG; */
#define FUTEX_OP_OR 2 /* *(int *)UADDR2 |= OPARG; */
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 179fea53fc81..3c1b0294a742 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -78,6 +78,7 @@ struct hd_struct {
sector_t start_sect;
sector_t nr_sects;
struct kobject kobj;
+ struct kobject *holder_dir;
unsigned ios[2], sectors[2]; /* READs and WRITEs */
int policy, partno;
};
@@ -114,6 +115,8 @@ struct gendisk {
int number; /* more of the same */
struct device *driverfs_dev;
struct kobject kobj;
+ struct kobject *holder_dir;
+ struct kobject *slave_dir;
struct timer_rand_state *random;
int policy;
diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h
index 679b46a6a565..c8b81f419fd8 100644
--- a/include/linux/i2c-id.h
+++ b/include/linux/i2c-id.h
@@ -108,6 +108,10 @@
#define I2C_DRIVERID_UPD64083 78 /* upd64083 video processor */
#define I2C_DRIVERID_UPD64031A 79 /* upd64031a video processor */
#define I2C_DRIVERID_SAA717X 80 /* saa717x video encoder */
+#define I2C_DRIVERID_DS1672 81 /* Dallas/Maxim DS1672 RTC */
+#define I2C_DRIVERID_X1205 82 /* Xicor/Intersil X1205 RTC */
+#define I2C_DRIVERID_PCF8563 83 /* Philips PCF8563 RTC */
+#define I2C_DRIVERID_RS5C372 84 /* Ricoh RS5C372 RTC */
#define I2C_DRIVERID_I2CDEV 900
#define I2C_DRIVERID_ARP 902 /* SMBus ARP Client */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 03d6cfaa5b8a..a3720f973ea5 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -87,7 +87,7 @@ extern int cond_resched(void);
(__x < 0) ? -__x : __x; \
})
-extern struct notifier_block *panic_notifier_list;
+extern struct atomic_notifier_head panic_notifier_list;
extern long (*panic_blink)(long time);
NORET_TYPE void panic(const char * fmt, ...)
__attribute__ ((NORET_AND format (printf, 1, 2)));
diff --git a/include/linux/m48t86.h b/include/linux/m48t86.h
new file mode 100644
index 000000000000..9065199319d0
--- /dev/null
+++ b/include/linux/m48t86.h
@@ -0,0 +1,16 @@
+/*
+ * ST M48T86 / Dallas DS12887 RTC driver
+ * Copyright (c) 2006 Tower Technologies
+ *
+ * Author: Alessandro Zummo <a.zummo@towertech.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+struct m48t86_ops
+{
+ void (*writeb)(unsigned char value, unsigned long addr);
+ unsigned char (*readb)(unsigned long addr);
+};
diff --git a/include/linux/memory.h b/include/linux/memory.h
index e251dc43d0f5..8f04143ca363 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -77,7 +77,6 @@ extern int remove_memory_block(unsigned long, struct mem_section *, int);
#define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT)
-struct notifier_block;
#endif /* CONFIG_MEMORY_HOTPLUG */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index ebfc238cc243..b5c21122c299 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -13,6 +13,7 @@
#include <linux/numa.h>
#include <linux/init.h>
#include <linux/seqlock.h>
+#include <linux/nodemask.h>
#include <asm/atomic.h>
/* Free memory management - zoned buddy allocator. */
@@ -225,7 +226,6 @@ struct zone {
* Discontig memory support fields.
*/
struct pglist_data *zone_pgdat;
- struct page *zone_mem_map;
/* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
unsigned long zone_start_pfn;
@@ -307,7 +307,6 @@ typedef struct pglist_data {
unsigned long node_spanned_pages; /* total size of physical page
range, including holes */
int node_id;
- struct pglist_data *pgdat_next;
wait_queue_head_t kswapd_wait;
struct task_struct *kswapd;
int kswapd_max_order;
@@ -324,8 +323,6 @@ typedef struct pglist_data {
#include <linux/memory_hotplug.h>
-extern struct pglist_data *pgdat_list;
-
void __get_zone_counts(unsigned long *active, unsigned long *inactive,
unsigned long *free, struct pglist_data *pgdat);
void get_zone_counts(unsigned long *active, unsigned long *inactive,
@@ -350,57 +347,6 @@ unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
*/
#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
-/**
- * for_each_pgdat - helper macro to iterate over all nodes
- * @pgdat - pointer to a pg_data_t variable
- *
- * Meant to help with common loops of the form
- * pgdat = pgdat_list;
- * while(pgdat) {
- * ...
- * pgdat = pgdat->pgdat_next;
- * }
- */
-#define for_each_pgdat(pgdat) \
- for (pgdat = pgdat_list; pgdat; pgdat = pgdat->pgdat_next)
-
-/*
- * next_zone - helper magic for for_each_zone()
- * Thanks to William Lee Irwin III for this piece of ingenuity.
- */
-static inline struct zone *next_zone(struct zone *zone)
-{
- pg_data_t *pgdat = zone->zone_pgdat;
-
- if (zone < pgdat->node_zones + MAX_NR_ZONES - 1)
- zone++;
- else if (pgdat->pgdat_next) {
- pgdat = pgdat->pgdat_next;
- zone = pgdat->node_zones;
- } else
- zone = NULL;
-
- return zone;
-}
-
-/**
- * for_each_zone - helper macro to iterate over all memory zones
- * @zone - pointer to struct zone variable
- *
- * The user only needs to declare the zone variable, for_each_zone
- * fills it in. This basically means for_each_zone() is an
- * easier to read version of this piece of code:
- *
- * for (pgdat = pgdat_list; pgdat; pgdat = pgdat->node_next)
- * for (i = 0; i < MAX_NR_ZONES; ++i) {
- * struct zone * z = pgdat->node_zones + i;
- * ...
- * }
- * }
- */
-#define for_each_zone(zone) \
- for (zone = pgdat_list->node_zones; zone; zone = next_zone(zone))
-
static inline int populated_zone(struct zone *zone)
{
return (!!zone->present_pages);
@@ -472,6 +418,30 @@ extern struct pglist_data contig_page_data;
#endif /* !CONFIG_NEED_MULTIPLE_NODES */
+extern struct pglist_data *first_online_pgdat(void);
+extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
+extern struct zone *next_zone(struct zone *zone);
+
+/**
+ * for_each_pgdat - helper macro to iterate over all nodes
+ * @pgdat - pointer to a pg_data_t variable
+ */
+#define for_each_online_pgdat(pgdat) \
+ for (pgdat = first_online_pgdat(); \
+ pgdat; \
+ pgdat = next_online_pgdat(pgdat))
+/**
+ * for_each_zone - helper macro to iterate over all memory zones
+ * @zone - pointer to struct zone variable
+ *
+ * The user only needs to declare the zone variable, for_each_zone
+ * fills it in.
+ */
+#define for_each_zone(zone) \
+ for (zone = (first_online_pgdat())->node_zones; \
+ zone; \
+ zone = next_zone(zone))
+
#ifdef CONFIG_SPARSEMEM
#include <asm/sparsemem.h>
#endif
@@ -602,17 +572,6 @@ static inline struct mem_section *__pfn_to_section(unsigned long pfn)
return __nr_to_section(pfn_to_section_nr(pfn));
}
-#define pfn_to_page(pfn) \
-({ \
- unsigned long __pfn = (pfn); \
- __section_mem_map_addr(__pfn_to_section(__pfn)) + __pfn; \
-})
-#define page_to_pfn(page) \
-({ \
- page - __section_mem_map_addr(__nr_to_section( \
- page_to_section(page))); \
-})
-
static inline int pfn_valid(unsigned long pfn)
{
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
diff --git a/include/linux/netfilter_ipv4/ip_conntrack.h b/include/linux/netfilter_ipv4/ip_conntrack.h
index f32d75c4f4cf..d54d7b278e96 100644
--- a/include/linux/netfilter_ipv4/ip_conntrack.h
+++ b/include/linux/netfilter_ipv4/ip_conntrack.h
@@ -308,29 +308,30 @@ DECLARE_PER_CPU(struct ip_conntrack_ecache, ip_conntrack_ecache);
#define CONNTRACK_ECACHE(x) (__get_cpu_var(ip_conntrack_ecache).x)
-extern struct notifier_block *ip_conntrack_chain;
-extern struct notifier_block *ip_conntrack_expect_chain;
+extern struct atomic_notifier_head ip_conntrack_chain;
+extern struct atomic_notifier_head ip_conntrack_expect_chain;
static inline int ip_conntrack_register_notifier(struct notifier_block *nb)
{
- return notifier_chain_register(&ip_conntrack_chain, nb);
+ return atomic_notifier_chain_register(&ip_conntrack_chain, nb);
}
static inline int ip_conntrack_unregister_notifier(struct notifier_block *nb)
{
- return notifier_chain_unregister(&ip_conntrack_chain, nb);
+ return atomic_notifier_chain_unregister(&ip_conntrack_chain, nb);
}
static inline int
ip_conntrack_expect_register_notifier(struct notifier_block *nb)
{
- return notifier_chain_register(&ip_conntrack_expect_chain, nb);
+ return atomic_notifier_chain_register(&ip_conntrack_expect_chain, nb);
}
static inline int
ip_conntrack_expect_unregister_notifier(struct notifier_block *nb)
{
- return notifier_chain_unregister(&ip_conntrack_expect_chain, nb);
+ return atomic_notifier_chain_unregister(&ip_conntrack_expect_chain,
+ nb);
}
extern void ip_ct_deliver_cached_events(const struct ip_conntrack *ct);
@@ -355,14 +356,14 @@ static inline void ip_conntrack_event(enum ip_conntrack_events event,
struct ip_conntrack *ct)
{
if (is_confirmed(ct) && !is_dying(ct))
- notifier_call_chain(&ip_conntrack_chain, event, ct);
+ atomic_notifier_call_chain(&ip_conntrack_chain, event, ct);
}
static inline void
ip_conntrack_expect_event(enum ip_conntrack_expect_events event,
struct ip_conntrack_expect *exp)
{
- notifier_call_chain(&ip_conntrack_expect_chain, event, exp);
+ atomic_notifier_call_chain(&ip_conntrack_expect_chain, event, exp);
}
#else /* CONFIG_IP_NF_CONNTRACK_EVENTS */
static inline void ip_conntrack_event_cache(enum ip_conntrack_events event,
diff --git a/include/linux/nfsd/export.h b/include/linux/nfsd/export.h
index 6bad4766d3d9..d2a8abb5011a 100644
--- a/include/linux/nfsd/export.h
+++ b/include/linux/nfsd/export.h
@@ -67,7 +67,8 @@ struct svc_expkey {
int ek_fsidtype;
u32 ek_fsid[3];
- struct svc_export * ek_export;
+ struct vfsmount * ek_mnt;
+ struct dentry * ek_dentry;
};
#define EX_SECURE(exp) (!((exp)->ex_flags & NFSEXP_INSECURE_PORT))
@@ -85,9 +86,6 @@ void nfsd_export_shutdown(void);
void nfsd_export_flush(void);
void exp_readlock(void);
void exp_readunlock(void);
-struct svc_expkey * exp_find_key(struct auth_domain *clp,
- int fsid_type, u32 *fsidv,
- struct cache_req *reqp);
struct svc_export * exp_get_by_name(struct auth_domain *clp,
struct vfsmount *mnt,
struct dentry *dentry,
@@ -101,35 +99,20 @@ int exp_rootfh(struct auth_domain *,
int exp_pseudoroot(struct auth_domain *, struct svc_fh *fhp, struct cache_req *creq);
int nfserrno(int errno);
-extern void expkey_put(struct cache_head *item, struct cache_detail *cd);
-extern void svc_export_put(struct cache_head *item, struct cache_detail *cd);
-extern struct cache_detail svc_export_cache, svc_expkey_cache;
+extern struct cache_detail svc_export_cache;
static inline void exp_put(struct svc_export *exp)
{
- svc_export_put(&exp->h, &svc_export_cache);
+ cache_put(&exp->h, &svc_export_cache);
}
static inline void exp_get(struct svc_export *exp)
{
cache_get(&exp->h);
}
-static inline struct svc_export *
+extern struct svc_export *
exp_find(struct auth_domain *clp, int fsid_type, u32 *fsidv,
- struct cache_req *reqp)
-{
- struct svc_expkey *ek = exp_find_key(clp, fsid_type, fsidv, reqp);
- if (ek && !IS_ERR(ek)) {
- struct svc_export *exp = ek->ek_export;
- int err;
- exp_get(exp);
- expkey_put(&ek->h, &svc_expkey_cache);
- if ((err = cache_check(&svc_export_cache, &exp->h, reqp)))
- exp = ERR_PTR(err);
- return exp;
- } else
- return ERR_PTR(PTR_ERR(ek));
-}
+ struct cache_req *reqp);
#endif /* __KERNEL__ */
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index b959a4525cbd..1a9ef3e627d1 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -350,11 +350,15 @@ extern nodemask_t node_possible_map;
#define num_possible_nodes() nodes_weight(node_possible_map)
#define node_online(node) node_isset((node), node_online_map)
#define node_possible(node) node_isset((node), node_possible_map)
+#define first_online_node first_node(node_online_map)
+#define next_online_node(nid) next_node((nid), node_online_map)
#else
#define num_online_nodes() 1
#define num_possible_nodes() 1
#define node_online(node) ((node) == 0)
#define node_possible(node) ((node) == 0)
+#define first_online_node 0
+#define next_online_node(nid) (MAX_NUMNODES)
#endif
#define any_online_node(mask) \
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index 5937dd6053c3..51dbab9710c7 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -10,25 +10,107 @@
#ifndef _LINUX_NOTIFIER_H
#define _LINUX_NOTIFIER_H
#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/rwsem.h>
-struct notifier_block
-{
- int (*notifier_call)(struct notifier_block *self, unsigned long, void *);
+/*
+ * Notifier chains are of three types:
+ *
+ * Atomic notifier chains: Chain callbacks run in interrupt/atomic
+ * context. Callouts are not allowed to block.
+ * Blocking notifier chains: Chain callbacks run in process context.
+ * Callouts are allowed to block.
+ * Raw notifier chains: There are no restrictions on callbacks,
+ * registration, or unregistration. All locking and protection
+ * must be provided by the caller.
+ *
+ * atomic_notifier_chain_register() may be called from an atomic context,
+ * but blocking_notifier_chain_register() must be called from a process
+ * context. Ditto for the corresponding _unregister() routines.
+ *
+ * atomic_notifier_chain_unregister() and blocking_notifier_chain_unregister()
+ * _must not_ be called from within the call chain.
+ */
+
+struct notifier_block {
+ int (*notifier_call)(struct notifier_block *, unsigned long, void *);
struct notifier_block *next;
int priority;
};
+struct atomic_notifier_head {
+ spinlock_t lock;
+ struct notifier_block *head;
+};
+
+struct blocking_notifier_head {
+ struct rw_semaphore rwsem;
+ struct notifier_block *head;
+};
+
+struct raw_notifier_head {
+ struct notifier_block *head;
+};
+
+#define ATOMIC_INIT_NOTIFIER_HEAD(name) do { \
+ spin_lock_init(&(name)->lock); \
+ (name)->head = NULL; \
+ } while (0)
+#define BLOCKING_INIT_NOTIFIER_HEAD(name) do { \
+ init_rwsem(&(name)->rwsem); \
+ (name)->head = NULL; \
+ } while (0)
+#define RAW_INIT_NOTIFIER_HEAD(name) do { \
+ (name)->head = NULL; \
+ } while (0)
+
+#define ATOMIC_NOTIFIER_INIT(name) { \
+ .lock = SPIN_LOCK_UNLOCKED, \
+ .head = NULL }
+#define BLOCKING_NOTIFIER_INIT(name) { \
+ .rwsem = __RWSEM_INITIALIZER((name).rwsem), \
+ .head = NULL }
+#define RAW_NOTIFIER_INIT(name) { \
+ .head = NULL }
+
+#define ATOMIC_NOTIFIER_HEAD(name) \
+ struct atomic_notifier_head name = \
+ ATOMIC_NOTIFIER_INIT(name)
+#define BLOCKING_NOTIFIER_HEAD(name) \
+ struct blocking_notifier_head name = \
+ BLOCKING_NOTIFIER_INIT(name)
+#define RAW_NOTIFIER_HEAD(name) \
+ struct raw_notifier_head name = \
+ RAW_NOTIFIER_INIT(name)
#ifdef __KERNEL__
-extern int notifier_chain_register(struct notifier_block **list, struct notifier_block *n);
-extern int notifier_chain_unregister(struct notifier_block **nl, struct notifier_block *n);
-extern int notifier_call_chain(struct notifier_block **n, unsigned long val, void *v);
+extern int atomic_notifier_chain_register(struct atomic_notifier_head *,
+ struct notifier_block *);
+extern int blocking_notifier_chain_register(struct blocking_notifier_head *,
+ struct notifier_block *);
+extern int raw_notifier_chain_register(struct raw_notifier_head *,
+ struct notifier_block *);
+
+extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *,
+ struct notifier_block *);
+extern int blocking_notifier_chain_unregister(struct blocking_notifier_head *,
+ struct notifier_block *);
+extern int raw_notifier_chain_unregister(struct raw_notifier_head *,
+ struct notifier_block *);
+
+extern int atomic_notifier_call_chain(struct atomic_notifier_head *,
+ unsigned long val, void *v);
+extern int blocking_notifier_call_chain(struct blocking_notifier_head *,
+ unsigned long val, void *v);
+extern int raw_notifier_call_chain(struct raw_notifier_head *,
+ unsigned long val, void *v);
#define NOTIFY_DONE 0x0000 /* Don't care */
#define NOTIFY_OK 0x0001 /* Suits me */
#define NOTIFY_STOP_MASK 0x8000 /* Don't call further */
-#define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002) /* Bad/Veto action */
+#define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002)
+ /* Bad/Veto action */
/*
* Clean way to return from the notifier and stop further calls.
*/
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 6f080ae59286..02f6cf20b141 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1052,6 +1052,7 @@
#define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6600_ALT2 0x00f2
#define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6200_ALT1 0x00f3
#define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6800_GT 0x00f9
+#define PCIE_DEVICE_ID_NVIDIA_QUADRO_NVS280 0x00fd
#define PCI_DEVICE_ID_NVIDIA_GEFORCE_SDR 0x0100
#define PCI_DEVICE_ID_NVIDIA_GEFORCE_DDR 0x0101
#define PCI_DEVICE_ID_NVIDIA_QUADRO 0x0103
diff --git a/include/linux/pfn.h b/include/linux/pfn.h
new file mode 100644
index 000000000000..bb01f8b92b56
--- /dev/null
+++ b/include/linux/pfn.h
@@ -0,0 +1,9 @@
+#ifndef _LINUX_PFN_H_
+#define _LINUX_PFN_H_
+
+#define PFN_ALIGN(x) (((unsigned long)(x) + (PAGE_SIZE - 1)) & PAGE_MASK)
+#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
+#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
+#define PFN_PHYS(x) ((x) << PAGE_SHIFT)
+
+#endif
diff --git a/include/linux/raid/md.h b/include/linux/raid/md.h
index b6e0bcad84e1..66b44e5e0d6e 100644
--- a/include/linux/raid/md.h
+++ b/include/linux/raid/md.h
@@ -92,7 +92,10 @@ extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
extern void md_super_wait(mddev_t *mddev);
extern int sync_page_io(struct block_device *bdev, sector_t sector, int size,
struct page *page, int rw);
+extern void md_do_sync(mddev_t *mddev);
+extern void md_new_event(mddev_t *mddev);
+extern void md_update_sb(mddev_t * mddev);
#define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h
index 617b9506c760..e2df61f5b09a 100644
--- a/include/linux/raid/md_k.h
+++ b/include/linux/raid/md_k.h
@@ -132,6 +132,14 @@ struct mddev_s
char uuid[16];
+ /* If the array is being reshaped, we need to record the
+ * new shape and an indication of where we are up to.
+ * This is written to the superblock.
+ * If reshape_position is MaxSector, then no reshape is happening (yet).
+ */
+ sector_t reshape_position;
+ int delta_disks, new_level, new_layout, new_chunk;
+
struct mdk_thread_s *thread; /* management thread */
struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */
sector_t curr_resync; /* blocks scheduled */
@@ -143,6 +151,10 @@ struct mddev_s
sector_t resync_mismatches; /* count of sectors where
* parity/replica mismatch found
*/
+
+ /* allow user-space to request suspension of IO to regions of the array */
+ sector_t suspend_lo;
+ sector_t suspend_hi;
/* if zero, use the system-wide default */
int sync_speed_min;
int sync_speed_max;
@@ -157,6 +169,9 @@ struct mddev_s
* DONE: thread is done and is waiting to be reaped
* REQUEST: user-space has requested a sync (used with SYNC)
* CHECK: user-space request for for check-only, no repair
+ * RESHAPE: A reshape is happening
+ *
+ * If neither SYNC or RESHAPE are set, then it is a recovery.
*/
#define MD_RECOVERY_RUNNING 0
#define MD_RECOVERY_SYNC 1
@@ -166,10 +181,11 @@ struct mddev_s
#define MD_RECOVERY_NEEDED 5
#define MD_RECOVERY_REQUESTED 6
#define MD_RECOVERY_CHECK 7
+#define MD_RECOVERY_RESHAPE 8
unsigned long recovery;
int in_sync; /* know to not need resync */
- struct semaphore reconfig_sem;
+ struct mutex reconfig_mutex;
atomic_t active;
int changed; /* true if we might need to reread partition info */
@@ -249,7 +265,8 @@ struct mdk_personality
int (*spare_active) (mddev_t *mddev);
sector_t (*sync_request)(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster);
int (*resize) (mddev_t *mddev, sector_t sectors);
- int (*reshape) (mddev_t *mddev, int raid_disks);
+ int (*check_reshape) (mddev_t *mddev);
+ int (*start_reshape) (mddev_t *mddev);
int (*reconfig) (mddev_t *mddev, int layout, int chunk_size);
/* quiesce moves between quiescence states
* 0 - fully active
diff --git a/include/linux/raid/md_p.h b/include/linux/raid/md_p.h
index c100fa5d4bfa..774e1acfb8c4 100644
--- a/include/linux/raid/md_p.h
+++ b/include/linux/raid/md_p.h
@@ -102,6 +102,18 @@ typedef struct mdp_device_descriptor_s {
#define MD_SB_ERRORS 1
#define MD_SB_BITMAP_PRESENT 8 /* bitmap may be present nearby */
+
+/*
+ * Notes:
+ * - if an array is being reshaped (restriped) in order to change the
+ * the number of active devices in the array, 'raid_disks' will be
+ * the larger of the old and new numbers. 'delta_disks' will
+ * be the "new - old". So if +ve, raid_disks is the new value, and
+ * "raid_disks-delta_disks" is the old. If -ve, raid_disks is the
+ * old value and "raid_disks+delta_disks" is the new (smaller) value.
+ */
+
+
typedef struct mdp_superblock_s {
/*
* Constant generic information
@@ -146,7 +158,13 @@ typedef struct mdp_superblock_s {
__u32 cp_events_hi; /* 10 high-order of checkpoint update count */
#endif
__u32 recovery_cp; /* 11 recovery checkpoint sector count */
- __u32 gstate_sreserved[MD_SB_GENERIC_STATE_WORDS - 12];
+ /* There are only valid for minor_version > 90 */
+ __u64 reshape_position; /* 12,13 next address in array-space for reshape */
+ __u32 new_level; /* 14 new level we are reshaping to */
+ __u32 delta_disks; /* 15 change in number of raid_disks */
+ __u32 new_layout; /* 16 new layout */
+ __u32 new_chunk; /* 17 new chunk size (bytes) */
+ __u32 gstate_sreserved[MD_SB_GENERIC_STATE_WORDS - 18];
/*
* Personality information
@@ -207,7 +225,14 @@ struct mdp_superblock_1 {
* NOTE: signed, so bitmap can be before superblock
* only meaningful of feature_map[0] is set.
*/
- __u8 pad1[128-100]; /* set to 0 when written */
+
+ /* These are only valid with feature bit '4' */
+ __u64 reshape_position; /* next address in array-space for reshape */
+ __u32 new_level; /* new level we are reshaping to */
+ __u32 delta_disks; /* change in number of raid_disks */
+ __u32 new_layout; /* new layout */
+ __u32 new_chunk; /* new chunk size (bytes) */
+ __u8 pad1[128-124]; /* set to 0 when written */
/* constant this-device information - 64 bytes */
__u64 data_offset; /* sector start of data, often 0 */
@@ -240,8 +265,9 @@ struct mdp_superblock_1 {
/* feature_map bits */
#define MD_FEATURE_BITMAP_OFFSET 1
+#define MD_FEATURE_RESHAPE_ACTIVE 4
-#define MD_FEATURE_ALL 1
+#define MD_FEATURE_ALL 5
#endif
diff --git a/include/linux/raid/raid5.h b/include/linux/raid/raid5.h
index 394da8207b34..914af667044f 100644
--- a/include/linux/raid/raid5.h
+++ b/include/linux/raid/raid5.h
@@ -135,6 +135,7 @@ struct stripe_head {
atomic_t count; /* nr of active thread/requests */
spinlock_t lock;
int bm_seq; /* sequence number for bitmap flushes */
+ int disks; /* disks in stripe */
struct r5dev {
struct bio req;
struct bio_vec vec;
@@ -156,6 +157,7 @@ struct stripe_head {
#define R5_ReadError 8 /* seen a read error here recently */
#define R5_ReWrite 9 /* have tried to over-write the readerror */
+#define R5_Expanded 10 /* This block now has post-expand data */
/*
* Write method
*/
@@ -174,7 +176,9 @@ struct stripe_head {
#define STRIPE_DELAYED 6
#define STRIPE_DEGRADED 7
#define STRIPE_BIT_DELAY 8
-
+#define STRIPE_EXPANDING 9
+#define STRIPE_EXPAND_SOURCE 10
+#define STRIPE_EXPAND_READY 11
/*
* Plugging:
*
@@ -211,12 +215,24 @@ struct raid5_private_data {
int raid_disks, working_disks, failed_disks;
int max_nr_stripes;
+ /* used during an expand */
+ sector_t expand_progress; /* MaxSector when no expand happening */
+ sector_t expand_lo; /* from here up to expand_progress it out-of-bounds
+ * as we haven't flushed the metadata yet
+ */
+ int previous_raid_disks;
+
struct list_head handle_list; /* stripes needing handling */
struct list_head delayed_list; /* stripes that have plugged requests */
struct list_head bitmap_list; /* stripes delaying awaiting bitmap update */
atomic_t preread_active_stripes; /* stripes with scheduled io */
- char cache_name[20];
+ atomic_t reshape_stripes; /* stripes with pending writes for reshape */
+ /* unfortunately we need two cache names as we temporarily have
+ * two caches.
+ */
+ int active_name;
+ char cache_name[2][20];
kmem_cache_t *slab_cache; /* for allocating stripes */
int seq_flush, seq_write;
@@ -238,9 +254,10 @@ struct raid5_private_data {
wait_queue_head_t wait_for_overlap;
int inactive_blocked; /* release of inactive stripes blocked,
* waiting for 25% to be free
- */
+ */
+ int pool_size; /* number of disks in stripeheads in pool */
spinlock_t device_lock;
- struct disk_info disks[0];
+ struct disk_info *disks;
};
typedef struct raid5_private_data raid5_conf_t;
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index b739ac1f7ca0..ab61cd1199f2 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -91,10 +91,102 @@ struct rtc_pll_info {
#define RTC_PLL_GET _IOR('p', 0x11, struct rtc_pll_info) /* Get PLL correction */
#define RTC_PLL_SET _IOW('p', 0x12, struct rtc_pll_info) /* Set PLL correction */
+/* interrupt flags */
+#define RTC_IRQF 0x80 /* any of the following is active */
+#define RTC_PF 0x40
+#define RTC_AF 0x20
+#define RTC_UF 0x10
+
#ifdef __KERNEL__
#include <linux/interrupt.h>
+extern int rtc_month_days(unsigned int month, unsigned int year);
+extern int rtc_valid_tm(struct rtc_time *tm);
+extern int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time);
+extern void rtc_time_to_tm(unsigned long time, struct rtc_time *tm);
+
+#include <linux/device.h>
+#include <linux/seq_file.h>
+#include <linux/cdev.h>
+#include <linux/poll.h>
+#include <linux/mutex.h>
+
+extern struct class *rtc_class;
+
+struct rtc_class_ops {
+ int (*open)(struct device *);
+ void (*release)(struct device *);
+ int (*ioctl)(struct device *, unsigned int, unsigned long);
+ int (*read_time)(struct device *, struct rtc_time *);
+ int (*set_time)(struct device *, struct rtc_time *);
+ int (*read_alarm)(struct device *, struct rtc_wkalrm *);
+ int (*set_alarm)(struct device *, struct rtc_wkalrm *);
+ int (*proc)(struct device *, struct seq_file *);
+ int (*set_mmss)(struct device *, unsigned long secs);
+ int (*irq_set_state)(struct device *, int enabled);
+ int (*irq_set_freq)(struct device *, int freq);
+ int (*read_callback)(struct device *, int data);
+};
+
+#define RTC_DEVICE_NAME_SIZE 20
+struct rtc_task;
+
+struct rtc_device
+{
+ struct class_device class_dev;
+ struct module *owner;
+
+ int id;
+ char name[RTC_DEVICE_NAME_SIZE];
+
+ struct rtc_class_ops *ops;
+ struct mutex ops_lock;
+
+ struct class_device *rtc_dev;
+ struct cdev char_dev;
+ struct mutex char_lock;
+
+ unsigned long irq_data;
+ spinlock_t irq_lock;
+ wait_queue_head_t irq_queue;
+ struct fasync_struct *async_queue;
+
+ struct rtc_task *irq_task;
+ spinlock_t irq_task_lock;
+ int irq_freq;
+};
+#define to_rtc_device(d) container_of(d, struct rtc_device, class_dev)
+
+extern struct rtc_device *rtc_device_register(const char *name,
+ struct device *dev,
+ struct rtc_class_ops *ops,
+ struct module *owner);
+extern void rtc_device_unregister(struct rtc_device *rdev);
+extern int rtc_interface_register(struct class_interface *intf);
+
+extern int rtc_read_time(struct class_device *class_dev, struct rtc_time *tm);
+extern int rtc_set_time(struct class_device *class_dev, struct rtc_time *tm);
+extern int rtc_set_mmss(struct class_device *class_dev, unsigned long secs);
+extern int rtc_read_alarm(struct class_device *class_dev,
+ struct rtc_wkalrm *alrm);
+extern int rtc_set_alarm(struct class_device *class_dev,
+ struct rtc_wkalrm *alrm);
+extern void rtc_update_irq(struct class_device *class_dev,
+ unsigned long num, unsigned long events);
+
+extern struct class_device *rtc_class_open(char *name);
+extern void rtc_class_close(struct class_device *class_dev);
+
+extern int rtc_irq_register(struct class_device *class_dev,
+ struct rtc_task *task);
+extern void rtc_irq_unregister(struct class_device *class_dev,
+ struct rtc_task *task);
+extern int rtc_irq_set_state(struct class_device *class_dev,
+ struct rtc_task *task, int enabled);
+extern int rtc_irq_set_freq(struct class_device *class_dev,
+ struct rtc_task *task, int freq);
+
typedef struct rtc_task {
void (*func)(void *private_data);
void *private_data;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 036d14d2bf90..20b4f0372e44 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -35,6 +35,7 @@
#include <linux/topology.h>
#include <linux/seccomp.h>
#include <linux/rcupdate.h>
+#include <linux/futex.h>
#include <linux/auxvec.h> /* For AT_VECTOR_SIZE */
@@ -872,6 +873,11 @@ struct task_struct {
int cpuset_mems_generation;
int cpuset_mem_spread_rotor;
#endif
+ struct robust_list_head __user *robust_list;
+#ifdef CONFIG_COMPAT
+ struct compat_robust_list_head __user *compat_robust_list;
+#endif
+
atomic_t fs_excl; /* holding fs exclusive resources */
struct rcu_head rcu;
};
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index c4e3ea7cf154..b5612c958cce 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -50,7 +50,7 @@ struct cache_head {
time_t last_refresh; /* If CACHE_PENDING, this is when upcall
* was sent, else this is when update was received
*/
- atomic_t refcnt;
+ struct kref ref;
unsigned long flags;
};
#define CACHE_VALID 0 /* Entry contains valid data */
@@ -68,8 +68,7 @@ struct cache_detail {
atomic_t inuse; /* active user-space update or lookup */
char *name;
- void (*cache_put)(struct cache_head *,
- struct cache_detail*);
+ void (*cache_put)(struct kref *);
void (*cache_request)(struct cache_detail *cd,
struct cache_head *h,
@@ -81,6 +80,11 @@ struct cache_detail {
struct cache_detail *cd,
struct cache_head *h);
+ struct cache_head * (*alloc)(void);
+ int (*match)(struct cache_head *orig, struct cache_head *new);
+ void (*init)(struct cache_head *orig, struct cache_head *new);
+ void (*update)(struct cache_head *orig, struct cache_head *new);
+
/* fields below this comment are for internal use
* and should not be touched by cache owners
*/
@@ -123,126 +127,14 @@ struct cache_deferred_req {
int too_many);
};
-/*
- * just like a template in C++, this macro does cache lookup
- * for us.
- * The function is passed some sort of HANDLE from which a cache_detail
- * structure can be determined (via SETUP, DETAIL), a template
- * cache entry (type RTN*), and a "set" flag. Using the HASHFN and the
- * TEST, the function will try to find a matching cache entry in the cache.
- * If "set" == 0 :
- * If an entry is found, it is returned
- * If no entry is found, a new non-VALID entry is created.
- * If "set" == 1 and INPLACE == 0 :
- * If no entry is found a new one is inserted with data from "template"
- * If a non-CACHE_VALID entry is found, it is updated from template using UPDATE
- * If a CACHE_VALID entry is found, a new entry is swapped in with data
- * from "template"
- * If set == 1, and INPLACE == 1 :
- * As above, except that if a CACHE_VALID entry is found, we UPDATE in place
- * instead of swapping in a new entry.
- *
- * If the passed handle has the CACHE_NEGATIVE flag set, then UPDATE is not
- * run but insteead CACHE_NEGATIVE is set in any new item.
- * In any case, the new entry is returned with a reference count.
- *
- *
- * RTN is a struct type for a cache entry
- * MEMBER is the member of the cache which is cache_head, which must be first
- * FNAME is the name for the function
- * ARGS are arguments to function and must contain RTN *item, int set. May
- * also contain something to be usedby SETUP or DETAIL to find cache_detail.
- * SETUP locates the cache detail and makes it available as...
- * DETAIL identifies the cache detail, possibly set up by SETUP
- * HASHFN returns a hash value of the cache entry "item"
- * TEST tests if "tmp" matches "item"
- * INIT copies key information from "item" to "new"
- * UPDATE copies content information from "item" to "tmp"
- * INPLACE is true if updates can happen inplace rather than allocating a new structure
- *
- * WARNING: any substantial changes to this must be reflected in
- * net/sunrpc/svcauth.c(auth_domain_lookup)
- * which is a similar routine that is open-coded.
- */
-#define DefineCacheLookup(RTN,MEMBER,FNAME,ARGS,SETUP,DETAIL,HASHFN,TEST,INIT,UPDATE,INPLACE) \
-RTN *FNAME ARGS \
-{ \
- RTN *tmp, *new=NULL; \
- struct cache_head **hp, **head; \
- SETUP; \
- head = &(DETAIL)->hash_table[HASHFN]; \
- retry: \
- if (set||new) write_lock(&(DETAIL)->hash_lock); \
- else read_lock(&(DETAIL)->hash_lock); \
- for(hp=head; *hp != NULL; hp = &tmp->MEMBER.next) { \
- tmp = container_of(*hp, RTN, MEMBER); \
- if (TEST) { /* found a match */ \
- \
- if (set && !INPLACE && test_bit(CACHE_VALID, &tmp->MEMBER.flags) && !new) \
- break; \
- \
- if (new) \
- {INIT;} \
- if (set) { \
- if (!INPLACE && test_bit(CACHE_VALID, &tmp->MEMBER.flags))\
- { /* need to swap in new */ \
- RTN *t2; \
- \
- new->MEMBER.next = tmp->MEMBER.next; \
- *hp = &new->MEMBER; \
- tmp->MEMBER.next = NULL; \
- t2 = tmp; tmp = new; new = t2; \
- } \
- if (test_bit(CACHE_NEGATIVE, &item->MEMBER.flags)) \
- set_bit(CACHE_NEGATIVE, &tmp->MEMBER.flags); \
- else { \
- UPDATE; \
- clear_bit(CACHE_NEGATIVE, &tmp->MEMBER.flags); \
- } \
- } \
- cache_get(&tmp->MEMBER); \
- if (set||new) write_unlock(&(DETAIL)->hash_lock); \
- else read_unlock(&(DETAIL)->hash_lock); \
- if (set) \
- cache_fresh(DETAIL, &tmp->MEMBER, item->MEMBER.expiry_time); \
- if (set && !INPLACE && new) cache_fresh(DETAIL, &new->MEMBER, 0); \
- if (new) (DETAIL)->cache_put(&new->MEMBER, DETAIL); \
- return tmp; \
- } \
- } \
- /* Didn't find anything */ \
- if (new) { \
- INIT; \
- new->MEMBER.next = *head; \
- *head = &new->MEMBER; \
- (DETAIL)->entries ++; \
- cache_get(&new->MEMBER); \
- if (set) { \
- tmp = new; \
- if (test_bit(CACHE_NEGATIVE, &item->MEMBER.flags)) \
- set_bit(CACHE_NEGATIVE, &tmp->MEMBER.flags); \
- else {UPDATE;} \
- } \
- } \
- if (set||new) write_unlock(&(DETAIL)->hash_lock); \
- else read_unlock(&(DETAIL)->hash_lock); \
- if (new && set) \
- cache_fresh(DETAIL, &new->MEMBER, item->MEMBER.expiry_time); \
- if (new) \
- return new; \
- new = kmalloc(sizeof(*new), GFP_KERNEL); \
- if (new) { \
- cache_init(&new->MEMBER); \
- goto retry; \
- } \
- return NULL; \
-}
+extern struct cache_head *
+sunrpc_cache_lookup(struct cache_detail *detail,
+ struct cache_head *key, int hash);
+extern struct cache_head *
+sunrpc_cache_update(struct cache_detail *detail,
+ struct cache_head *new, struct cache_head *old, int hash);
-#define DefineSimpleCacheLookup(STRUCT,INPLACE) \
- DefineCacheLookup(struct STRUCT, h, STRUCT##_lookup, (struct STRUCT *item, int set), /*no setup */, \
- & STRUCT##_cache, STRUCT##_hash(item), STRUCT##_match(item, tmp),\
- STRUCT##_init(new, item), STRUCT##_update(tmp, item),INPLACE)
#define cache_for_each(pos, detail, index, member) \
for (({read_lock(&(detail)->hash_lock); index = (detail)->hash_size;}) ; \
@@ -258,22 +150,19 @@ extern void cache_clean_deferred(void *owner);
static inline struct cache_head *cache_get(struct cache_head *h)
{
- atomic_inc(&h->refcnt);
+ kref_get(&h->ref);
return h;
}
-static inline int cache_put(struct cache_head *h, struct cache_detail *cd)
+static inline void cache_put(struct cache_head *h, struct cache_detail *cd)
{
- if (atomic_read(&h->refcnt) <= 2 &&
+ if (atomic_read(&h->ref.refcount) <= 2 &&
h->expiry_time < cd->nextcheck)
cd->nextcheck = h->expiry_time;
- return atomic_dec_and_test(&h->refcnt);
+ kref_put(&h->ref, cd->cache_put);
}
-extern void cache_init(struct cache_head *h);
-extern void cache_fresh(struct cache_detail *detail,
- struct cache_head *head, time_t expiry);
extern int cache_check(struct cache_detail *detail,
struct cache_head *h, struct cache_req *rqstp);
extern void cache_flush(void);
diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
index c119ce7cbd22..2fe2087edd66 100644
--- a/include/linux/sunrpc/svcauth.h
+++ b/include/linux/sunrpc/svcauth.h
@@ -45,9 +45,10 @@ struct svc_rqst; /* forward decl */
* of ip addresses to the given client.
*/
struct auth_domain {
- struct cache_head h;
+ struct kref ref;
+ struct hlist_node hash;
char *name;
- int flavour;
+ struct auth_ops *flavour;
};
/*
@@ -86,6 +87,9 @@ struct auth_domain {
*
* domain_release()
* This call releases a domain.
+ * set_client()
+ * Givens a pending request (struct svc_rqst), finds and assigns
+ * an appropriate 'auth_domain' as the client.
*/
struct auth_ops {
char * name;
@@ -117,7 +121,7 @@ extern void svc_auth_unregister(rpc_authflavor_t flavor);
extern struct auth_domain *unix_domain_find(char *name);
extern void auth_domain_put(struct auth_domain *item);
extern int auth_unix_add_addr(struct in_addr addr, struct auth_domain *dom);
-extern struct auth_domain *auth_domain_lookup(struct auth_domain *item, int set);
+extern struct auth_domain *auth_domain_lookup(char *name, struct auth_domain *new);
extern struct auth_domain *auth_domain_find(char *name);
extern struct auth_domain *auth_unix_lookup(struct in_addr addr);
extern int auth_unix_forget_old(struct auth_domain *dom);
@@ -160,8 +164,6 @@ static inline unsigned long hash_mem(char *buf, int length, int bits)
return hash >> (BITS_PER_LONG - bits);
}
-extern struct cache_detail auth_domain_cache, ip_map_cache;
-
#endif /* __KERNEL__ */
#endif /* _LINUX_SUNRPC_SVCAUTH_H_ */
diff --git a/include/linux/threads.h b/include/linux/threads.h
index b59738ac6197..e646bcdf2614 100644
--- a/include/linux/threads.h
+++ b/include/linux/threads.h
@@ -28,7 +28,8 @@
#define PID_MAX_DEFAULT (CONFIG_BASE_SMALL ? 0x1000 : 0x8000)
/*
- * A maximum of 4 million PIDs should be enough for a while:
+ * A maximum of 4 million PIDs should be enough for a while.
+ * [NOTE: PID/TIDs are limited to 2^29 ~= 500+ million, see futex.h.]
*/
#define PID_MAX_LIMIT (CONFIG_BASE_SMALL ? PAGE_SIZE * 8 : \
(sizeof(long) > 4 ? 4 * 1024 * 1024 : PID_MAX_DEFAULT))
diff --git a/include/linux/topology.h b/include/linux/topology.h
index e8eb0040ce3a..a305ae2e44b6 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -164,6 +164,15 @@
.nr_balance_failed = 0, \
}
+#ifdef CONFIG_SCHED_MC
+#ifndef SD_MC_INIT
+/* for now its same as SD_CPU_INIT.
+ * TBD: Tune Domain parameters!
+ */
+#define SD_MC_INIT SD_CPU_INIT
+#endif
+#endif
+
#ifdef CONFIG_NUMA
#ifndef SD_NODE_INIT
#error Please define an appropriate SD_NODE_INIT in include/asm/topology.h!!!
diff --git a/include/linux/x1205.h b/include/linux/x1205.h
deleted file mode 100644
index 64fd3af894a5..000000000000
--- a/include/linux/x1205.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * x1205.h - defines for drivers/i2c/chips/x1205.c
- * Copyright 2004 Karen Spearel
- * Copyright 2005 Alessandro Zummo
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __LINUX_X1205_H__
-#define __LINUX_X1205_H__
-
-/* commands */
-
-#define X1205_CMD_GETDATETIME 0
-#define X1205_CMD_SETTIME 1
-#define X1205_CMD_SETDATETIME 2
-#define X1205_CMD_GETALARM 3
-#define X1205_CMD_SETALARM 4
-#define X1205_CMD_GETDTRIM 5
-#define X1205_CMD_SETDTRIM 6
-#define X1205_CMD_GETATRIM 7
-#define X1205_CMD_SETATRIM 8
-
-extern int x1205_do_command(unsigned int cmd, void *arg);
-extern int x1205_direct_attach(int adapter_id,
- struct i2c_client_address_data *address_data);
-
-#endif /* __LINUX_X1205_H__ */
OpenPOWER on IntegriCloud