summaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2006-06-20 08:59:45 -0400
committerTrond Myklebust <Trond.Myklebust@netapp.com>2006-06-20 08:59:45 -0400
commitd59bf96cdde5b874a57bfd1425faa45da915d0b7 (patch)
tree351a40b72514d620e5bebea2de38c26f23277ffc /include/linux
parent28df955a2ad484d602314b30183ea8496a9aa34a (diff)
parent25f42b6af09e34c3f92107b36b5aa6edc2fdba2f (diff)
downloadblackbird-op-linux-d59bf96cdde5b874a57bfd1425faa45da915d0b7.tar.gz
blackbird-op-linux-d59bf96cdde5b874a57bfd1425faa45da915d0b7.zip
Merge branch 'master' of /home/trondmy/kernel/linux-2.6/
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/console.h4
-rw-r--r--include/linux/dmaengine.h359
-rw-r--r--include/linux/i2o.h5
-rw-r--r--include/linux/igmp.h2
-rw-r--r--include/linux/netdevice.h45
-rw-r--r--include/linux/netfilter/nf_conntrack_common.h4
-rw-r--r--include/linux/netfilter/nfnetlink_conntrack.h4
-rw-r--r--include/linux/netfilter/xt_CONNSECMARK.h13
-rw-r--r--include/linux/netfilter/xt_SECMARK.h26
-rw-r--r--include/linux/netfilter/xt_quota.h16
-rw-r--r--include/linux/netfilter/xt_statistic.h32
-rw-r--r--include/linux/netfilter_ipv4/ip_conntrack.h6
-rw-r--r--include/linux/netfilter_ipv4/ip_conntrack_h323.h7
-rw-r--r--include/linux/netfilter_ipv4/ip_conntrack_helper_h323_types.h3
-rw-r--r--include/linux/netfilter_ipv4/ip_conntrack_sip.h44
-rw-r--r--include/linux/pci-acpi.h2
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/pci_ids.h12
-rw-r--r--include/linux/pfkeyv2.h2
-rw-r--r--include/linux/security.h40
-rw-r--r--include/linux/selinux.h32
-rw-r--r--include/linux/skbuff.h74
-rw-r--r--include/linux/sysctl.h4
-rw-r--r--include/linux/tcp.h8
-rw-r--r--include/linux/xfrm.h4
25 files changed, 719 insertions, 31 deletions
diff --git a/include/linux/console.h b/include/linux/console.h
index 721371382ae5..08734e660d41 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -117,6 +117,10 @@ extern void console_stop(struct console *);
extern void console_start(struct console *);
extern int is_console_locked(void);
+/* Suspend and resume console messages over PM events */
+extern void suspend_console(void);
+extern void resume_console(void);
+
/* Some debug stub to catch some of the obvious races in the VT code */
#if 1
#define WARN_CONSOLE_UNLOCKED() WARN_ON(!is_console_locked() && !oops_in_progress)
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
new file mode 100644
index 000000000000..78b236ca04f8
--- /dev/null
+++ b/include/linux/dmaengine.h
@@ -0,0 +1,359 @@
+/*
+ * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called COPYING.
+ */
+#ifndef DMAENGINE_H
+#define DMAENGINE_H
+#include <linux/config.h>
+#ifdef CONFIG_DMA_ENGINE
+
+#include <linux/device.h>
+#include <linux/uio.h>
+#include <linux/kref.h>
+#include <linux/completion.h>
+#include <linux/rcupdate.h>
+
+/**
+ * enum dma_event - resource PNP/power managment events
+ * @DMA_RESOURCE_SUSPEND: DMA device going into low power state
+ * @DMA_RESOURCE_RESUME: DMA device returning to full power
+ * @DMA_RESOURCE_ADDED: DMA device added to the system
+ * @DMA_RESOURCE_REMOVED: DMA device removed from the system
+ */
+enum dma_event {
+ DMA_RESOURCE_SUSPEND,
+ DMA_RESOURCE_RESUME,
+ DMA_RESOURCE_ADDED,
+ DMA_RESOURCE_REMOVED,
+};
+
+/**
+ * typedef dma_cookie_t
+ *
+ * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code
+ */
+typedef s32 dma_cookie_t;
+
+#define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0)
+
+/**
+ * enum dma_status - DMA transaction status
+ * @DMA_SUCCESS: transaction completed successfully
+ * @DMA_IN_PROGRESS: transaction not yet processed
+ * @DMA_ERROR: transaction failed
+ */
+enum dma_status {
+ DMA_SUCCESS,
+ DMA_IN_PROGRESS,
+ DMA_ERROR,
+};
+
+/**
+ * struct dma_chan_percpu - the per-CPU part of struct dma_chan
+ * @refcount: local_t used for open-coded "bigref" counting
+ * @memcpy_count: transaction counter
+ * @bytes_transferred: byte counter
+ */
+
+struct dma_chan_percpu {
+ local_t refcount;
+ /* stats */
+ unsigned long memcpy_count;
+ unsigned long bytes_transferred;
+};
+
+/**
+ * struct dma_chan - devices supply DMA channels, clients use them
+ * @client: ptr to the client user of this chan, will be NULL when unused
+ * @device: ptr to the dma device who supplies this channel, always !NULL
+ * @cookie: last cookie value returned to client
+ * @chan_id:
+ * @class_dev:
+ * @refcount: kref, used in "bigref" slow-mode
+ * @slow_ref:
+ * @rcu:
+ * @client_node: used to add this to the client chan list
+ * @device_node: used to add this to the device chan list
+ * @local: per-cpu pointer to a struct dma_chan_percpu
+ */
+struct dma_chan {
+ struct dma_client *client;
+ struct dma_device *device;
+ dma_cookie_t cookie;
+
+ /* sysfs */
+ int chan_id;
+ struct class_device class_dev;
+
+ struct kref refcount;
+ int slow_ref;
+ struct rcu_head rcu;
+
+ struct list_head client_node;
+ struct list_head device_node;
+ struct dma_chan_percpu *local;
+};
+
+void dma_chan_cleanup(struct kref *kref);
+
+static inline void dma_chan_get(struct dma_chan *chan)
+{
+ if (unlikely(chan->slow_ref))
+ kref_get(&chan->refcount);
+ else {
+ local_inc(&(per_cpu_ptr(chan->local, get_cpu())->refcount));
+ put_cpu();
+ }
+}
+
+static inline void dma_chan_put(struct dma_chan *chan)
+{
+ if (unlikely(chan->slow_ref))
+ kref_put(&chan->refcount, dma_chan_cleanup);
+ else {
+ local_dec(&(per_cpu_ptr(chan->local, get_cpu())->refcount));
+ put_cpu();
+ }
+}
+
+/*
+ * typedef dma_event_callback - function pointer to a DMA event callback
+ */
+typedef void (*dma_event_callback) (struct dma_client *client,
+ struct dma_chan *chan, enum dma_event event);
+
+/**
+ * struct dma_client - info on the entity making use of DMA services
+ * @event_callback: func ptr to call when something happens
+ * @chan_count: number of chans allocated
+ * @chans_desired: number of chans requested. Can be +/- chan_count
+ * @lock: protects access to the channels list
+ * @channels: the list of DMA channels allocated
+ * @global_node: list_head for global dma_client_list
+ */
+struct dma_client {
+ dma_event_callback event_callback;
+ unsigned int chan_count;
+ unsigned int chans_desired;
+
+ spinlock_t lock;
+ struct list_head channels;
+ struct list_head global_node;
+};
+
+/**
+ * struct dma_device - info on the entity supplying DMA services
+ * @chancnt: how many DMA channels are supported
+ * @channels: the list of struct dma_chan
+ * @global_node: list_head for global dma_device_list
+ * @refcount:
+ * @done:
+ * @dev_id:
+ * Other func ptrs: used to make use of this device's capabilities
+ */
+struct dma_device {
+
+ unsigned int chancnt;
+ struct list_head channels;
+ struct list_head global_node;
+
+ struct kref refcount;
+ struct completion done;
+
+ int dev_id;
+
+ int (*device_alloc_chan_resources)(struct dma_chan *chan);
+ void (*device_free_chan_resources)(struct dma_chan *chan);
+ dma_cookie_t (*device_memcpy_buf_to_buf)(struct dma_chan *chan,
+ void *dest, void *src, size_t len);
+ dma_cookie_t (*device_memcpy_buf_to_pg)(struct dma_chan *chan,
+ struct page *page, unsigned int offset, void *kdata,
+ size_t len);
+ dma_cookie_t (*device_memcpy_pg_to_pg)(struct dma_chan *chan,
+ struct page *dest_pg, unsigned int dest_off,
+ struct page *src_pg, unsigned int src_off, size_t len);
+ enum dma_status (*device_memcpy_complete)(struct dma_chan *chan,
+ dma_cookie_t cookie, dma_cookie_t *last,
+ dma_cookie_t *used);
+ void (*device_memcpy_issue_pending)(struct dma_chan *chan);
+};
+
+/* --- public DMA engine API --- */
+
+struct dma_client *dma_async_client_register(dma_event_callback event_callback);
+void dma_async_client_unregister(struct dma_client *client);
+void dma_async_client_chan_request(struct dma_client *client,
+ unsigned int number);
+
+/**
+ * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
+ * @chan: DMA channel to offload copy to
+ * @dest: destination address (virtual)
+ * @src: source address (virtual)
+ * @len: length
+ *
+ * Both @dest and @src must be mappable to a bus address according to the
+ * DMA mapping API rules for streaming mappings.
+ * Both @dest and @src must stay memory resident (kernel memory or locked
+ * user space pages)
+ */
+static inline dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
+ void *dest, void *src, size_t len)
+{
+ int cpu = get_cpu();
+ per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
+ per_cpu_ptr(chan->local, cpu)->memcpy_count++;
+ put_cpu();
+
+ return chan->device->device_memcpy_buf_to_buf(chan, dest, src, len);
+}
+
+/**
+ * dma_async_memcpy_buf_to_pg - offloaded copy
+ * @chan: DMA channel to offload copy to
+ * @page: destination page
+ * @offset: offset in page to copy to
+ * @kdata: source address (virtual)
+ * @len: length
+ *
+ * Both @page/@offset and @kdata must be mappable to a bus address according
+ * to the DMA mapping API rules for streaming mappings.
+ * Both @page/@offset and @kdata must stay memory resident (kernel memory or
+ * locked user space pages)
+ */
+static inline dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
+ struct page *page, unsigned int offset, void *kdata, size_t len)
+{
+ int cpu = get_cpu();
+ per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
+ per_cpu_ptr(chan->local, cpu)->memcpy_count++;
+ put_cpu();
+
+ return chan->device->device_memcpy_buf_to_pg(chan, page, offset,
+ kdata, len);
+}
+
+/**
+ * dma_async_memcpy_buf_to_pg - offloaded copy
+ * @chan: DMA channel to offload copy to
+ * @dest_page: destination page
+ * @dest_off: offset in page to copy to
+ * @src_page: source page
+ * @src_off: offset in page to copy from
+ * @len: length
+ *
+ * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
+ * address according to the DMA mapping API rules for streaming mappings.
+ * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
+ * (kernel memory or locked user space pages)
+ */
+static inline dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan,
+ struct page *dest_pg, unsigned int dest_off, struct page *src_pg,
+ unsigned int src_off, size_t len)
+{
+ int cpu = get_cpu();
+ per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
+ per_cpu_ptr(chan->local, cpu)->memcpy_count++;
+ put_cpu();
+
+ return chan->device->device_memcpy_pg_to_pg(chan, dest_pg, dest_off,
+ src_pg, src_off, len);
+}
+
+/**
+ * dma_async_memcpy_issue_pending - flush pending copies to HW
+ * @chan:
+ *
+ * This allows drivers to push copies to HW in batches,
+ * reducing MMIO writes where possible.
+ */
+static inline void dma_async_memcpy_issue_pending(struct dma_chan *chan)
+{
+ return chan->device->device_memcpy_issue_pending(chan);
+}
+
+/**
+ * dma_async_memcpy_complete - poll for transaction completion
+ * @chan: DMA channel
+ * @cookie: transaction identifier to check status of
+ * @last: returns last completed cookie, can be NULL
+ * @used: returns last issued cookie, can be NULL
+ *
+ * If @last and @used are passed in, upon return they reflect the driver
+ * internal state and can be used with dma_async_is_complete() to check
+ * the status of multiple cookies without re-checking hardware state.
+ */
+static inline enum dma_status dma_async_memcpy_complete(struct dma_chan *chan,
+ dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
+{
+ return chan->device->device_memcpy_complete(chan, cookie, last, used);
+}
+
+/**
+ * dma_async_is_complete - test a cookie against chan state
+ * @cookie: transaction identifier to test status of
+ * @last_complete: last know completed transaction
+ * @last_used: last cookie value handed out
+ *
+ * dma_async_is_complete() is used in dma_async_memcpy_complete()
+ * the test logic is seperated for lightweight testing of multiple cookies
+ */
+static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
+ dma_cookie_t last_complete, dma_cookie_t last_used)
+{
+ if (last_complete <= last_used) {
+ if ((cookie <= last_complete) || (cookie > last_used))
+ return DMA_SUCCESS;
+ } else {
+ if ((cookie <= last_complete) && (cookie > last_used))
+ return DMA_SUCCESS;
+ }
+ return DMA_IN_PROGRESS;
+}
+
+
+/* --- DMA device --- */
+
+int dma_async_device_register(struct dma_device *device);
+void dma_async_device_unregister(struct dma_device *device);
+
+/* --- Helper iov-locking functions --- */
+
+struct dma_page_list {
+ char *base_address;
+ int nr_pages;
+ struct page **pages;
+};
+
+struct dma_pinned_list {
+ int nr_iovecs;
+ struct dma_page_list page_list[0];
+};
+
+struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
+void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
+
+dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
+ struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
+dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
+ struct dma_pinned_list *pinned_list, struct page *page,
+ unsigned int offset, size_t len);
+
+#endif /* CONFIG_DMA_ENGINE */
+#endif /* DMAENGINE_H */
diff --git a/include/linux/i2o.h b/include/linux/i2o.h
index dd7d627bf66f..c115e9e840b4 100644
--- a/include/linux/i2o.h
+++ b/include/linux/i2o.h
@@ -1114,8 +1114,11 @@ static inline struct i2o_message *i2o_msg_get(struct i2o_controller *c)
mmsg->mfa = readl(c->in_port);
if (unlikely(mmsg->mfa >= c->in_queue.len)) {
+ u32 mfa = mmsg->mfa;
+
mempool_free(mmsg, c->in_msg.mempool);
- if(mmsg->mfa == I2O_QUEUE_EMPTY)
+
+ if (mfa == I2O_QUEUE_EMPTY)
return ERR_PTR(-EBUSY);
return ERR_PTR(-EFAULT);
}
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index 28f4f3b36950..899c3d4776f3 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -169,7 +169,7 @@ struct ip_sf_list
struct ip_mc_list
{
struct in_device *interface;
- unsigned long multiaddr;
+ __be32 multiaddr;
struct ip_sf_list *sources;
struct ip_sf_list *tomb;
unsigned int sfmode;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index f4169bbb60eb..e432b743dda2 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -37,6 +37,7 @@
#include <linux/config.h>
#include <linux/device.h>
#include <linux/percpu.h>
+#include <linux/dmaengine.h>
struct divert_blk;
struct vlan_group;
@@ -311,6 +312,9 @@ struct net_device
#define NETIF_F_LLTX 4096 /* LockLess TX */
#define NETIF_F_UFO 8192 /* Can offload UDP Large Send*/
+#define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
+#define NETIF_F_ALL_CSUM (NETIF_F_IP_CSUM | NETIF_F_GEN_CSUM)
+
struct net_device *next_sched;
/* Interface index. Unique device identifier */
@@ -406,7 +410,7 @@ struct net_device
* One part is mostly used on xmit path (device)
*/
/* hard_start_xmit synchronizer */
- spinlock_t xmit_lock ____cacheline_aligned_in_smp;
+ spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
/* cpu id of processor entered to hard_start_xmit or -1,
if nobody entered there.
*/
@@ -593,6 +597,9 @@ struct softnet_data
struct sk_buff *completion_queue;
struct net_device backlog_dev; /* Sorry. 8) */
+#ifdef CONFIG_NET_DMA
+ struct dma_chan *net_dma;
+#endif
};
DECLARE_PER_CPU(struct softnet_data,softnet_data);
@@ -889,11 +896,43 @@ static inline void __netif_rx_complete(struct net_device *dev)
clear_bit(__LINK_STATE_RX_SCHED, &dev->state);
}
+static inline void netif_tx_lock(struct net_device *dev)
+{
+ spin_lock(&dev->_xmit_lock);
+ dev->xmit_lock_owner = smp_processor_id();
+}
+
+static inline void netif_tx_lock_bh(struct net_device *dev)
+{
+ spin_lock_bh(&dev->_xmit_lock);
+ dev->xmit_lock_owner = smp_processor_id();
+}
+
+static inline int netif_tx_trylock(struct net_device *dev)
+{
+ int err = spin_trylock(&dev->_xmit_lock);
+ if (!err)
+ dev->xmit_lock_owner = smp_processor_id();
+ return err;
+}
+
+static inline void netif_tx_unlock(struct net_device *dev)
+{
+ dev->xmit_lock_owner = -1;
+ spin_unlock(&dev->_xmit_lock);
+}
+
+static inline void netif_tx_unlock_bh(struct net_device *dev)
+{
+ dev->xmit_lock_owner = -1;
+ spin_unlock_bh(&dev->_xmit_lock);
+}
+
static inline void netif_tx_disable(struct net_device *dev)
{
- spin_lock_bh(&dev->xmit_lock);
+ netif_tx_lock_bh(dev);
netif_stop_queue(dev);
- spin_unlock_bh(&dev->xmit_lock);
+ netif_tx_unlock_bh(dev);
}
/* These functions live elsewhere (drivers/net/net_init.c, but related) */
diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h
index 3ff88c878308..d2e4bd7a7a14 100644
--- a/include/linux/netfilter/nf_conntrack_common.h
+++ b/include/linux/netfilter/nf_conntrack_common.h
@@ -69,6 +69,10 @@ enum ip_conntrack_status {
/* Connection is dying (removed from lists), can not be unset. */
IPS_DYING_BIT = 9,
IPS_DYING = (1 << IPS_DYING_BIT),
+
+ /* Connection has fixed timeout. */
+ IPS_FIXED_TIMEOUT_BIT = 10,
+ IPS_FIXED_TIMEOUT = (1 << IPS_FIXED_TIMEOUT_BIT),
};
/* Connection tracking event bits */
diff --git a/include/linux/netfilter/nfnetlink_conntrack.h b/include/linux/netfilter/nfnetlink_conntrack.h
index 668ec946c8e2..b5883ccee295 100644
--- a/include/linux/netfilter/nfnetlink_conntrack.h
+++ b/include/linux/netfilter/nfnetlink_conntrack.h
@@ -27,13 +27,15 @@ enum ctattr_type {
CTA_STATUS,
CTA_PROTOINFO,
CTA_HELP,
- CTA_NAT,
+ CTA_NAT_SRC,
+#define CTA_NAT CTA_NAT_SRC /* backwards compatibility */
CTA_TIMEOUT,
CTA_MARK,
CTA_COUNTERS_ORIG,
CTA_COUNTERS_REPLY,
CTA_USE,
CTA_ID,
+ CTA_NAT_DST,
__CTA_MAX
};
#define CTA_MAX (__CTA_MAX - 1)
diff --git a/include/linux/netfilter/xt_CONNSECMARK.h b/include/linux/netfilter/xt_CONNSECMARK.h
new file mode 100644
index 000000000000..c6bd75469ba2
--- /dev/null
+++ b/include/linux/netfilter/xt_CONNSECMARK.h
@@ -0,0 +1,13 @@
+#ifndef _XT_CONNSECMARK_H_target
+#define _XT_CONNSECMARK_H_target
+
+enum {
+ CONNSECMARK_SAVE = 1,
+ CONNSECMARK_RESTORE,
+};
+
+struct xt_connsecmark_target_info {
+ u_int8_t mode;
+};
+
+#endif /*_XT_CONNSECMARK_H_target */
diff --git a/include/linux/netfilter/xt_SECMARK.h b/include/linux/netfilter/xt_SECMARK.h
new file mode 100644
index 000000000000..c53fbffa997d
--- /dev/null
+++ b/include/linux/netfilter/xt_SECMARK.h
@@ -0,0 +1,26 @@
+#ifndef _XT_SECMARK_H_target
+#define _XT_SECMARK_H_target
+
+/*
+ * This is intended for use by various security subsystems (but not
+ * at the same time).
+ *
+ * 'mode' refers to the specific security subsystem which the
+ * packets are being marked for.
+ */
+#define SECMARK_MODE_SEL 0x01 /* SELinux */
+#define SECMARK_SELCTX_MAX 256
+
+struct xt_secmark_target_selinux_info {
+ u_int32_t selsid;
+ char selctx[SECMARK_SELCTX_MAX];
+};
+
+struct xt_secmark_target_info {
+ u_int8_t mode;
+ union {
+ struct xt_secmark_target_selinux_info sel;
+ } u;
+};
+
+#endif /*_XT_SECMARK_H_target */
diff --git a/include/linux/netfilter/xt_quota.h b/include/linux/netfilter/xt_quota.h
new file mode 100644
index 000000000000..acd7fd77bbee
--- /dev/null
+++ b/include/linux/netfilter/xt_quota.h
@@ -0,0 +1,16 @@
+#ifndef _XT_QUOTA_H
+#define _XT_QUOTA_H
+
+enum xt_quota_flags {
+ XT_QUOTA_INVERT = 0x1,
+};
+#define XT_QUOTA_MASK 0x1
+
+struct xt_quota_info {
+ u_int32_t flags;
+ u_int32_t pad;
+ aligned_u64 quota;
+ struct xt_quota_info *master;
+};
+
+#endif /* _XT_QUOTA_H */
diff --git a/include/linux/netfilter/xt_statistic.h b/include/linux/netfilter/xt_statistic.h
new file mode 100644
index 000000000000..c344e9916e23
--- /dev/null
+++ b/include/linux/netfilter/xt_statistic.h
@@ -0,0 +1,32 @@
+#ifndef _XT_STATISTIC_H
+#define _XT_STATISTIC_H
+
+enum xt_statistic_mode {
+ XT_STATISTIC_MODE_RANDOM,
+ XT_STATISTIC_MODE_NTH,
+ __XT_STATISTIC_MODE_MAX
+};
+#define XT_STATISTIC_MODE_MAX (__XT_STATISTIC_MODE_MAX - 1)
+
+enum xt_statistic_flags {
+ XT_STATISTIC_INVERT = 0x1,
+};
+#define XT_STATISTIC_MASK 0x1
+
+struct xt_statistic_info {
+ u_int16_t mode;
+ u_int16_t flags;
+ union {
+ struct {
+ u_int32_t probability;
+ } random;
+ struct {
+ u_int32_t every;
+ u_int32_t packet;
+ u_int32_t count;
+ } nth;
+ } u;
+ struct xt_statistic_info *master __attribute__((aligned(8)));
+};
+
+#endif /* _XT_STATISTIC_H */
diff --git a/include/linux/netfilter_ipv4/ip_conntrack.h b/include/linux/netfilter_ipv4/ip_conntrack.h
index d54d7b278e96..e0e9951eb8c3 100644
--- a/include/linux/netfilter_ipv4/ip_conntrack.h
+++ b/include/linux/netfilter_ipv4/ip_conntrack.h
@@ -121,6 +121,10 @@ struct ip_conntrack
u_int32_t mark;
#endif
+#ifdef CONFIG_IP_NF_CONNTRACK_SECMARK
+ u_int32_t secmark;
+#endif
+
/* Traversed often, so hopefully in different cacheline to top */
/* These are my tuples; original and reply */
struct ip_conntrack_tuple_hash tuplehash[IP_CT_DIR_MAX];
@@ -154,6 +158,7 @@ struct ip_conntrack_expect
unsigned int flags;
#ifdef CONFIG_IP_NF_NAT_NEEDED
+ u_int32_t saved_ip;
/* This is the original per-proto part, used to map the
* expected connection the way the recipient expects. */
union ip_conntrack_manip_proto saved_proto;
@@ -293,6 +298,7 @@ static inline int is_dying(struct ip_conntrack *ct)
}
extern unsigned int ip_conntrack_htable_size;
+extern int ip_conntrack_checksum;
#define CONNTRACK_STAT_INC(count) (__get_cpu_var(ip_conntrack_stat).count++)
diff --git a/include/linux/netfilter_ipv4/ip_conntrack_h323.h b/include/linux/netfilter_ipv4/ip_conntrack_h323.h
index eace86bd2adb..3cbff7379002 100644
--- a/include/linux/netfilter_ipv4/ip_conntrack_h323.h
+++ b/include/linux/netfilter_ipv4/ip_conntrack_h323.h
@@ -71,6 +71,13 @@ extern int (*nat_h245_hook) (struct sk_buff ** pskb, struct ip_conntrack * ct,
unsigned char **data, int dataoff,
TransportAddress * addr, u_int16_t port,
struct ip_conntrack_expect * exp);
+extern int (*nat_callforwarding_hook) (struct sk_buff ** pskb,
+ struct ip_conntrack * ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned char **data, int dataoff,
+ TransportAddress * addr,
+ u_int16_t port,
+ struct ip_conntrack_expect * exp);
extern int (*nat_q931_hook) (struct sk_buff ** pskb, struct ip_conntrack * ct,
enum ip_conntrack_info ctinfo,
unsigned char **data, TransportAddress * addr,
diff --git a/include/linux/netfilter_ipv4/ip_conntrack_helper_h323_types.h b/include/linux/netfilter_ipv4/ip_conntrack_helper_h323_types.h
index cc98f7aa5abe..3d4a773799fc 100644
--- a/include/linux/netfilter_ipv4/ip_conntrack_helper_h323_types.h
+++ b/include/linux/netfilter_ipv4/ip_conntrack_helper_h323_types.h
@@ -1,4 +1,4 @@
-/* Generated by Jing Min Zhao's ASN.1 parser, Mar 15 2006
+/* Generated by Jing Min Zhao's ASN.1 parser, Apr 20 2006
*
* Copyright (c) 2006 Jing Min Zhao <zhaojingmin@users.sourceforge.net>
*
@@ -412,6 +412,7 @@ typedef struct Facility_UUIE { /* SEQUENCE */
eFacility_UUIE_destinationInfo = (1 << 14),
eFacility_UUIE_h245SecurityMode = (1 << 13),
} options;
+ TransportAddress alternativeAddress;
FacilityReason reason;
TransportAddress h245Address;
Facility_UUIE_fastStart fastStart;
diff --git a/include/linux/netfilter_ipv4/ip_conntrack_sip.h b/include/linux/netfilter_ipv4/ip_conntrack_sip.h
new file mode 100644
index 000000000000..913dad66c0fb
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_conntrack_sip.h
@@ -0,0 +1,44 @@
+#ifndef __IP_CONNTRACK_SIP_H__
+#define __IP_CONNTRACK_SIP_H__
+#ifdef __KERNEL__
+
+#define SIP_PORT 5060
+#define SIP_TIMEOUT 3600
+
+#define POS_VIA 0
+#define POS_CONTACT 1
+#define POS_CONTENT 2
+#define POS_MEDIA 3
+#define POS_OWNER 4
+#define POS_CONNECTION 5
+#define POS_REQ_HEADER 6
+#define POS_SDP_HEADER 7
+
+struct sip_header_nfo {
+ const char *lname;
+ const char *sname;
+ const char *ln_str;
+ size_t lnlen;
+ size_t snlen;
+ size_t ln_strlen;
+ int (*match_len)(const char *, const char *, int *);
+};
+
+extern unsigned int (*ip_nat_sip_hook)(struct sk_buff **pskb,
+ enum ip_conntrack_info ctinfo,
+ struct ip_conntrack *ct,
+ const char **dptr);
+extern unsigned int (*ip_nat_sdp_hook)(struct sk_buff **pskb,
+ enum ip_conntrack_info ctinfo,
+ struct ip_conntrack_expect *exp,
+ const char *dptr);
+
+extern int ct_sip_get_info(const char *dptr, size_t dlen,
+ unsigned int *matchoff,
+ unsigned int *matchlen,
+ struct sip_header_nfo *hnfo);
+extern int ct_sip_lnlen(const char *line, const char *limit);
+extern const char *ct_sip_search(const char *needle, const char *haystack,
+ size_t needle_len, size_t haystack_len);
+#endif /* __KERNEL__ */
+#endif /* __IP_CONNTRACK_SIP_H__ */
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index 4877e35ae202..936ef82ed76a 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -50,7 +50,7 @@
extern acpi_status pci_osc_control_set(acpi_handle handle, u32 flags);
extern acpi_status pci_osc_support_set(u32 flags);
#else
-#if !defined(acpi_status)
+#if !defined(AE_ERROR)
typedef u32 acpi_status;
#define AE_ERROR (acpi_status) (0x0001)
#endif
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 3a6a4e37a482..6fd36cb09160 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -442,6 +442,7 @@ struct pci_dev *pci_find_device_reverse (unsigned int vendor, unsigned int devic
struct pci_dev *pci_find_slot (unsigned int bus, unsigned int devfn);
int pci_find_capability (struct pci_dev *dev, int cap);
int pci_find_next_capability (struct pci_dev *dev, u8 pos, int cap);
+int pci_find_ext_capability (struct pci_dev *dev, int cap);
struct pci_bus * pci_find_next_bus(const struct pci_bus *from);
struct pci_dev *pci_get_device (unsigned int vendor, unsigned int device, struct pci_dev *from);
@@ -662,6 +663,7 @@ static inline int pci_register_driver(struct pci_driver *drv) { return 0;}
static inline void pci_unregister_driver(struct pci_driver *drv) { }
static inline int pci_find_capability (struct pci_dev *dev, int cap) {return 0; }
static inline int pci_find_next_capability (struct pci_dev *dev, u8 post, int cap) { return 0; }
+static inline int pci_find_ext_capability (struct pci_dev *dev, int cap) {return 0; }
static inline const struct pci_device_id *pci_match_device(const struct pci_device_id *ids, const struct pci_dev *dev) { return NULL; }
/* Power management related routines */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 590dc6dca315..bcfe9d4f56ae 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -935,6 +935,7 @@
#define PCI_DEVICE_ID_PLX_DJINN_ITOO 0x1151
#define PCI_DEVICE_ID_PLX_R753 0x1152
#define PCI_DEVICE_ID_PLX_OLITEC 0x1187
+#define PCI_DEVICE_ID_PLX_PCI200SYN 0x3196
#define PCI_DEVICE_ID_PLX_9050 0x9050
#define PCI_DEVICE_ID_PLX_9080 0x9080
#define PCI_DEVICE_ID_PLX_GTEK_SERIAL2 0xa001
@@ -1182,6 +1183,14 @@
#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_1100 0x034E
#define PCI_DEVICE_ID_NVIDIA_NVENET_14 0x0372
#define PCI_DEVICE_ID_NVIDIA_NVENET_15 0x0373
+#define PCI_DEVICE_ID_NVIDIA_NVENET_16 0x03E5
+#define PCI_DEVICE_ID_NVIDIA_NVENET_17 0x03E6
+#define PCI_DEVICE_ID_NVIDIA_NVENET_18 0x03EE
+#define PCI_DEVICE_ID_NVIDIA_NVENET_19 0x03EF
+#define PCI_DEVICE_ID_NVIDIA_NVENET_20 0x0450
+#define PCI_DEVICE_ID_NVIDIA_NVENET_21 0x0451
+#define PCI_DEVICE_ID_NVIDIA_NVENET_22 0x0452
+#define PCI_DEVICE_ID_NVIDIA_NVENET_23 0x0453
#define PCI_VENDOR_ID_IMS 0x10e0
#define PCI_DEVICE_ID_IMS_TT128 0x9128
@@ -1827,6 +1836,7 @@
#define PCI_VENDOR_ID_SAMSUNG 0x144d
+#define PCI_VENDOR_ID_MYRICOM 0x14c1
#define PCI_VENDOR_ID_TITAN 0x14D2
#define PCI_DEVICE_ID_TITAN_010L 0x8001
@@ -1887,6 +1897,7 @@
#define PCI_DEVICE_ID_TIGON3_5751F 0x167e
#define PCI_DEVICE_ID_TIGON3_5787M 0x1693
#define PCI_DEVICE_ID_TIGON3_5782 0x1696
+#define PCI_DEVICE_ID_TIGON3_5786 0x169a
#define PCI_DEVICE_ID_TIGON3_5787 0x169b
#define PCI_DEVICE_ID_TIGON3_5788 0x169c
#define PCI_DEVICE_ID_TIGON3_5789 0x169d
@@ -2043,6 +2054,7 @@
#define PCI_DEVICE_ID_INTEL_80960_RP 0x1960
#define PCI_DEVICE_ID_INTEL_82840_HB 0x1a21
#define PCI_DEVICE_ID_INTEL_82845_HB 0x1a30
+#define PCI_DEVICE_ID_INTEL_IOAT 0x1a38
#define PCI_DEVICE_ID_INTEL_82801AA_0 0x2410
#define PCI_DEVICE_ID_INTEL_82801AA_1 0x2411
#define PCI_DEVICE_ID_INTEL_82801AA_3 0x2413
diff --git a/include/linux/pfkeyv2.h b/include/linux/pfkeyv2.h
index bac0fb389cf1..d5dd471da225 100644
--- a/include/linux/pfkeyv2.h
+++ b/include/linux/pfkeyv2.h
@@ -159,7 +159,7 @@ struct sadb_spirange {
struct sadb_x_kmprivate {
uint16_t sadb_x_kmprivate_len;
uint16_t sadb_x_kmprivate_exttype;
- u_int32_t sadb_x_kmprivate_reserved;
+ uint32_t sadb_x_kmprivate_reserved;
} __attribute__((packed));
/* sizeof(struct sadb_x_kmprivate) == 8 */
diff --git a/include/linux/security.h b/include/linux/security.h
index 1bab48f6aeac..4dfb1b84a9b3 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -805,31 +805,37 @@ struct swap_info_struct;
* used by the XFRM system.
* @sec_ctx contains the security context information being provided by
* the user-level policy update program (e.g., setkey).
- * Allocate a security structure to the xp->selector.security field.
+ * Allocate a security structure to the xp->security field.
* The security field is initialized to NULL when the xfrm_policy is
* allocated.
* Return 0 if operation was successful (memory to allocate, legal context)
* @xfrm_policy_clone_security:
* @old contains an existing xfrm_policy in the SPD.
* @new contains a new xfrm_policy being cloned from old.
- * Allocate a security structure to the new->selector.security field
- * that contains the information from the old->selector.security field.
+ * Allocate a security structure to the new->security field
+ * that contains the information from the old->security field.
* Return 0 if operation was successful (memory to allocate).
* @xfrm_policy_free_security:
* @xp contains the xfrm_policy
- * Deallocate xp->selector.security.
+ * Deallocate xp->security.
+ * @xfrm_policy_delete_security:
+ * @xp contains the xfrm_policy.
+ * Authorize deletion of xp->security.
* @xfrm_state_alloc_security:
* @x contains the xfrm_state being added to the Security Association
* Database by the XFRM system.
* @sec_ctx contains the security context information being provided by
* the user-level SA generation program (e.g., setkey or racoon).
- * Allocate a security structure to the x->sel.security field. The
+ * Allocate a security structure to the x->security field. The
* security field is initialized to NULL when the xfrm_state is
* allocated.
* Return 0 if operation was successful (memory to allocate, legal context).
* @xfrm_state_free_security:
* @x contains the xfrm_state.
- * Deallocate x>sel.security.
+ * Deallocate x->security.
+ * @xfrm_state_delete_security:
+ * @x contains the xfrm_state.
+ * Authorize deletion of x->security.
* @xfrm_policy_lookup:
* @xp contains the xfrm_policy for which the access control is being
* checked.
@@ -1298,8 +1304,10 @@ struct security_operations {
int (*xfrm_policy_alloc_security) (struct xfrm_policy *xp, struct xfrm_user_sec_ctx *sec_ctx);
int (*xfrm_policy_clone_security) (struct xfrm_policy *old, struct xfrm_policy *new);
void (*xfrm_policy_free_security) (struct xfrm_policy *xp);
+ int (*xfrm_policy_delete_security) (struct xfrm_policy *xp);
int (*xfrm_state_alloc_security) (struct xfrm_state *x, struct xfrm_user_sec_ctx *sec_ctx);
void (*xfrm_state_free_security) (struct xfrm_state *x);
+ int (*xfrm_state_delete_security) (struct xfrm_state *x);
int (*xfrm_policy_lookup)(struct xfrm_policy *xp, u32 sk_sid, u8 dir);
#endif /* CONFIG_SECURITY_NETWORK_XFRM */
@@ -2934,11 +2942,21 @@ static inline void security_xfrm_policy_free(struct xfrm_policy *xp)
security_ops->xfrm_policy_free_security(xp);
}
+static inline int security_xfrm_policy_delete(struct xfrm_policy *xp)
+{
+ return security_ops->xfrm_policy_delete_security(xp);
+}
+
static inline int security_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *sec_ctx)
{
return security_ops->xfrm_state_alloc_security(x, sec_ctx);
}
+static inline int security_xfrm_state_delete(struct xfrm_state *x)
+{
+ return security_ops->xfrm_state_delete_security(x);
+}
+
static inline void security_xfrm_state_free(struct xfrm_state *x)
{
security_ops->xfrm_state_free_security(x);
@@ -2963,6 +2981,11 @@ static inline void security_xfrm_policy_free(struct xfrm_policy *xp)
{
}
+static inline int security_xfrm_policy_delete(struct xfrm_policy *xp)
+{
+ return 0;
+}
+
static inline int security_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *sec_ctx)
{
return 0;
@@ -2972,6 +2995,11 @@ static inline void security_xfrm_state_free(struct xfrm_state *x)
{
}
+static inline int security_xfrm_state_delete(struct xfrm_state *x)
+{
+ return 0;
+}
+
static inline int security_xfrm_policy_lookup(struct xfrm_policy *xp, u32 sk_sid, u8 dir)
{
return 0;
diff --git a/include/linux/selinux.h b/include/linux/selinux.h
index 4047bcde4484..aad4e390d6a5 100644
--- a/include/linux/selinux.h
+++ b/include/linux/selinux.h
@@ -118,6 +118,27 @@ void selinux_get_ipc_sid(const struct kern_ipc_perm *ipcp, u32 *sid);
*/
void selinux_get_task_sid(struct task_struct *tsk, u32 *sid);
+/**
+ * selinux_string_to_sid - map a security context string to a security ID
+ * @str: the security context string to be mapped
+ * @sid: ID value returned via this.
+ *
+ * Returns 0 if successful, with the SID stored in sid. A value
+ * of zero for sid indicates no SID could be determined (but no error
+ * occurred).
+ */
+int selinux_string_to_sid(char *str, u32 *sid);
+
+/**
+ * selinux_relabel_packet_permission - check permission to relabel a packet
+ * @sid: ID value to be applied to network packet (via SECMARK, most likely)
+ *
+ * Returns 0 if the current task is allowed to label packets with the
+ * supplied security ID. Note that it is implicit that the packet is always
+ * being relabeled from the default unlabled value, and that the access
+ * control decision is made in the AVC.
+ */
+int selinux_relabel_packet_permission(u32 sid);
#else
@@ -172,6 +193,17 @@ static inline void selinux_get_task_sid(struct task_struct *tsk, u32 *sid)
*sid = 0;
}
+static inline int selinux_string_to_sid(const char *str, u32 *sid)
+{
+ *sid = 0;
+ return 0;
+}
+
+static inline int selinux_relabel_packet_permission(u32 sid)
+{
+ return 0;
+}
+
#endif /* CONFIG_SECURITY_SELINUX */
#endif /* _LINUX_SELINUX_H */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index f8f234708b98..93e4db221585 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -29,6 +29,7 @@
#include <linux/net.h>
#include <linux/textsearch.h>
#include <net/checksum.h>
+#include <linux/dmaengine.h>
#define HAVE_ALLOC_SKB /* For the drivers to know */
#define HAVE_ALIGNABLE_SKB /* Ditto 8) */
@@ -209,6 +210,7 @@ enum {
* @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
* @tc_index: Traffic control index
* @tc_verd: traffic control verdict
+ * @secmark: security marking
*/
struct sk_buff {
@@ -285,6 +287,12 @@ struct sk_buff {
__u16 tc_verd; /* traffic control verdict */
#endif
#endif
+#ifdef CONFIG_NET_DMA
+ dma_cookie_t dma_cookie;
+#endif
+#ifdef CONFIG_NETWORK_SECMARK
+ __u32 secmark;
+#endif
/* These elements must be at the end, see alloc_skb() for details. */
@@ -967,15 +975,16 @@ static inline void skb_reserve(struct sk_buff *skb, int len)
#define NET_SKB_PAD 16
#endif
-extern int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc);
+extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
{
- if (!skb->data_len) {
- skb->len = len;
- skb->tail = skb->data + len;
- } else
- ___pskb_trim(skb, len, 0);
+ if (unlikely(skb->data_len)) {
+ WARN_ON(1);
+ return;
+ }
+ skb->len = len;
+ skb->tail = skb->data + len;
}
/**
@@ -985,6 +994,7 @@ static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
*
* Cut the length of a buffer down by removing data from the tail. If
* the buffer is already under the length specified it is not modified.
+ * The skb must be linear.
*/
static inline void skb_trim(struct sk_buff *skb, unsigned int len)
{
@@ -995,12 +1005,10 @@ static inline void skb_trim(struct sk_buff *skb, unsigned int len)
static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
{
- if (!skb->data_len) {
- skb->len = len;
- skb->tail = skb->data+len;
- return 0;
- }
- return ___pskb_trim(skb, len, 1);
+ if (skb->data_len)
+ return ___pskb_trim(skb, len);
+ __skb_trim(skb, len);
+ return 0;
}
static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
@@ -1161,18 +1169,34 @@ static inline int skb_can_coalesce(struct sk_buff *skb, int i,
return 0;
}
+static inline int __skb_linearize(struct sk_buff *skb)
+{
+ return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
+}
+
/**
* skb_linearize - convert paged skb to linear one
* @skb: buffer to linarize
- * @gfp: allocation mode
*
* If there is no free memory -ENOMEM is returned, otherwise zero
* is returned and the old skb data released.
*/
-extern int __skb_linearize(struct sk_buff *skb, gfp_t gfp);
-static inline int skb_linearize(struct sk_buff *skb, gfp_t gfp)
+static inline int skb_linearize(struct sk_buff *skb)
+{
+ return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
+}
+
+/**
+ * skb_linearize_cow - make sure skb is linear and writable
+ * @skb: buffer to process
+ *
+ * If there is no free memory -ENOMEM is returned, otherwise zero
+ * is returned and the old skb data released.
+ */
+static inline int skb_linearize_cow(struct sk_buff *skb)
{
- return __skb_linearize(skb, gfp);
+ return skb_is_nonlinear(skb) || skb_cloned(skb) ?
+ __skb_linearize(skb) : 0;
}
/**
@@ -1396,5 +1420,23 @@ static inline void nf_reset(struct sk_buff *skb)
static inline void nf_reset(struct sk_buff *skb) {}
#endif /* CONFIG_NETFILTER */
+#ifdef CONFIG_NETWORK_SECMARK
+static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
+{
+ to->secmark = from->secmark;
+}
+
+static inline void skb_init_secmark(struct sk_buff *skb)
+{
+ skb->secmark = 0;
+}
+#else
+static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
+{ }
+
+static inline void skb_init_secmark(struct sk_buff *skb)
+{ }
+#endif
+
#endif /* __KERNEL__ */
#endif /* _LINUX_SKBUFF_H */
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 76eaeff76f82..cee944dbdcd4 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -313,6 +313,7 @@ enum
NET_NF_CONNTRACK_FRAG6_TIMEOUT=29,
NET_NF_CONNTRACK_FRAG6_LOW_THRESH=30,
NET_NF_CONNTRACK_FRAG6_HIGH_THRESH=31,
+ NET_NF_CONNTRACK_CHECKSUM=32,
};
/* /proc/sys/net/ipv4 */
@@ -403,6 +404,8 @@ enum
NET_TCP_MTU_PROBING=113,
NET_TCP_BASE_MSS=114,
NET_IPV4_TCP_WORKAROUND_SIGNED_WINDOWS=115,
+ NET_TCP_DMA_COPYBREAK=116,
+ NET_TCP_SLOW_START_AFTER_IDLE=117,
};
enum {
@@ -491,6 +494,7 @@ enum
NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD=25,
NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT=26,
NET_IPV4_NF_CONNTRACK_COUNT=27,
+ NET_IPV4_NF_CONNTRACK_CHECKSUM=28,
};
/* /proc/sys/net/ipv6 */
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 542d39596bd8..c90daa5da6c3 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -18,6 +18,7 @@
#define _LINUX_TCP_H
#include <linux/types.h>
+#include <linux/dmaengine.h>
#include <asm/byteorder.h>
struct tcphdr {
@@ -233,6 +234,13 @@ struct tcp_sock {
struct iovec *iov;
int memory;
int len;
+#ifdef CONFIG_NET_DMA
+ /* members for async copy */
+ struct dma_chan *dma_chan;
+ int wakeup;
+ struct dma_pinned_list *pinned_list;
+ dma_cookie_t dma_cookie;
+#endif
} ucopy;
__u32 snd_wl1; /* Sequence for window update */
diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h
index 6b42cc474c01..46a15c7a1a13 100644
--- a/include/linux/xfrm.h
+++ b/include/linux/xfrm.h
@@ -118,6 +118,10 @@ enum
XFRM_SHARE_UNIQUE /* Use once */
};
+#define XFRM_MODE_TRANSPORT 0
+#define XFRM_MODE_TUNNEL 1
+#define XFRM_MODE_MAX 2
+
/* Netlink configuration messages. */
enum {
XFRM_MSG_BASE = 0x10,
OpenPOWER on IntegriCloud