diff options
143 files changed, 1655 insertions, 938 deletions
diff --git a/Documentation/Changes b/Documentation/Changes index cb2b141b1c3e..b95082be4d5e 100644 --- a/Documentation/Changes +++ b/Documentation/Changes @@ -33,10 +33,12 @@ o Gnu make 3.79.1 # make --version o binutils 2.12 # ld -v o util-linux 2.10o # fdformat --version o module-init-tools 0.9.10 # depmod -V -o e2fsprogs 1.29 # tune2fs +o e2fsprogs 1.41.4 # e2fsck -V o jfsutils 1.1.3 # fsck.jfs -V o reiserfsprogs 3.6.3 # reiserfsck -V 2>&1|grep reiserfsprogs o xfsprogs 2.6.0 # xfs_db -V +o squashfs-tools 4.0 # mksquashfs -version +o btrfs-progs 0.18 # btrfsck o pcmciautils 004 # pccardctl -V o quota-tools 3.09 # quota -V o PPP 2.4.0 # pppd --version diff --git a/Documentation/CodingStyle b/Documentation/CodingStyle index 1875e502f872..72968cd5eaf3 100644 --- a/Documentation/CodingStyle +++ b/Documentation/CodingStyle @@ -483,17 +483,25 @@ values. To do the latter, you can stick the following in your .emacs file: (* (max steps 1) c-basic-offset))) +(add-hook 'c-mode-common-hook + (lambda () + ;; Add kernel style + (c-add-style + "linux-tabs-only" + '("linux" (c-offsets-alist + (arglist-cont-nonempty + c-lineup-gcc-asm-reg + c-lineup-arglist-tabs-only)))))) + (add-hook 'c-mode-hook (lambda () (let ((filename (buffer-file-name))) ;; Enable kernel mode for the appropriate files (when (and filename - (string-match "~/src/linux-trees" filename)) + (string-match (expand-file-name "~/src/linux-trees") + filename)) (setq indent-tabs-mode t) - (c-set-style "linux") - (c-set-offset 'arglist-cont-nonempty - '(c-lineup-gcc-asm-reg - c-lineup-arglist-tabs-only)))))) + (c-set-style "linux-tabs-only"))))) This will make emacs go better with the kernel coding style for C files below ~/src/linux-trees. diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt index 52441694fe03..2a3fcc55e981 100644 --- a/Documentation/DMA-API.txt +++ b/Documentation/DMA-API.txt @@ -5,7 +5,7 @@ This document describes the DMA API. For a more gentle introduction phrased in terms of the pci_ equivalents (and actual examples) see -DMA-mapping.txt +Documentation/PCI/PCI-DMA-mapping.txt. This API is split into two pieces. Part I describes the API and the corresponding pci_ API. Part II describes the extensions to the API diff --git a/Documentation/DocBook/uio-howto.tmpl b/Documentation/DocBook/uio-howto.tmpl index b787e4721c90..52e1b79ce0e6 100644 --- a/Documentation/DocBook/uio-howto.tmpl +++ b/Documentation/DocBook/uio-howto.tmpl @@ -42,6 +42,12 @@ GPL version 2. <revhistory> <revision> + <revnumber>0.7</revnumber> + <date>2008-12-23</date> + <authorinitials>hjk</authorinitials> + <revremark>Added generic platform drivers and offset attribute.</revremark> + </revision> + <revision> <revnumber>0.6</revnumber> <date>2008-12-05</date> <authorinitials>hjk</authorinitials> @@ -312,6 +318,16 @@ interested in translating it, please email me pointed to by addr. </para> </listitem> +<listitem> + <para> + <filename>offset</filename>: The offset, in bytes, that has to be + added to the pointer returned by <function>mmap()</function> to get + to the actual device memory. This is important if the device's memory + is not page aligned. Remember that pointers returned by + <function>mmap()</function> are always page aligned, so it is good + style to always add this offset. + </para> +</listitem> </itemizedlist> <para> @@ -594,6 +610,78 @@ framework to set up sysfs files for this region. Simply leave it alone. </para> </sect1> +<sect1 id="using_uio_pdrv"> +<title>Using uio_pdrv for platform devices</title> + <para> + In many cases, UIO drivers for platform devices can be handled in a + generic way. In the same place where you define your + <varname>struct platform_device</varname>, you simply also implement + your interrupt handler and fill your + <varname>struct uio_info</varname>. A pointer to this + <varname>struct uio_info</varname> is then used as + <varname>platform_data</varname> for your platform device. + </para> + <para> + You also need to set up an array of <varname>struct resource</varname> + containing addresses and sizes of your memory mappings. This + information is passed to the driver using the + <varname>.resource</varname> and <varname>.num_resources</varname> + elements of <varname>struct platform_device</varname>. + </para> + <para> + You now have to set the <varname>.name</varname> element of + <varname>struct platform_device</varname> to + <varname>"uio_pdrv"</varname> to use the generic UIO platform device + driver. This driver will fill the <varname>mem[]</varname> array + according to the resources given, and register the device. + </para> + <para> + The advantage of this approach is that you only have to edit a file + you need to edit anyway. You do not have to create an extra driver. + </para> +</sect1> + +<sect1 id="using_uio_pdrv_genirq"> +<title>Using uio_pdrv_genirq for platform devices</title> + <para> + Especially in embedded devices, you frequently find chips where the + irq pin is tied to its own dedicated interrupt line. In such cases, + where you can be really sure the interrupt is not shared, we can take + the concept of <varname>uio_pdrv</varname> one step further and use a + generic interrupt handler. That's what + <varname>uio_pdrv_genirq</varname> does. + </para> + <para> + The setup for this driver is the same as described above for + <varname>uio_pdrv</varname>, except that you do not implement an + interrupt handler. The <varname>.handler</varname> element of + <varname>struct uio_info</varname> must remain + <varname>NULL</varname>. The <varname>.irq_flags</varname> element + must not contain <varname>IRQF_SHARED</varname>. + </para> + <para> + You will set the <varname>.name</varname> element of + <varname>struct platform_device</varname> to + <varname>"uio_pdrv_genirq"</varname> to use this driver. + </para> + <para> + The generic interrupt handler of <varname>uio_pdrv_genirq</varname> + will simply disable the interrupt line using + <function>disable_irq_nosync()</function>. After doing its work, + userspace can reenable the interrupt by writing 0x00000001 to the UIO + device file. The driver already implements an + <function>irq_control()</function> to make this possible, you must not + implement your own. + </para> + <para> + Using <varname>uio_pdrv_genirq</varname> not only saves a few lines of + interrupt handler code. You also do not need to know anything about + the chip's internal registers to create the kernel part of the driver. + All you need to know is the irq number of the pin the chip is + connected to. + </para> +</sect1> + </chapter> <chapter id="userspace_driver" xreflabel="Writing a driver in user space"> diff --git a/Documentation/IO-mapping.txt b/Documentation/IO-mapping.txt index 86edb61bdee6..78a440695e11 100644 --- a/Documentation/IO-mapping.txt +++ b/Documentation/IO-mapping.txt @@ -1,6 +1,6 @@ [ NOTE: The virt_to_bus() and bus_to_virt() functions have been - superseded by the functionality provided by the PCI DMA - interface (see Documentation/DMA-mapping.txt). They continue + superseded by the functionality provided by the PCI DMA interface + (see Documentation/PCI/PCI-DMA-mapping.txt). They continue to be documented below for historical purposes, but new code must not use them. --davidm 00/12/12 ] diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt index 3c5434c83daf..ecad6ee75705 100644 --- a/Documentation/block/biodoc.txt +++ b/Documentation/block/biodoc.txt @@ -186,8 +186,9 @@ a virtual address mapping (unlike the earlier scheme of virtual address do not have a corresponding kernel virtual address space mapping) and low-memory pages. -Note: Please refer to DMA-mapping.txt for a discussion on PCI high mem DMA -aspects and mapping of scatter gather lists, and support for 64 bit PCI. +Note: Please refer to Documentation/PCI/PCI-DMA-mapping.txt for a discussion +on PCI high mem DMA aspects and mapping of scatter gather lists, and support +for 64 bit PCI. Special handling is required only for cases where i/o needs to happen on pages at physical memory addresses beyond what the device can support. In these @@ -953,14 +954,14 @@ elevator_allow_merge_fn called whenever the block layer determines results in some sort of conflict internally, this hook allows it to do that. -elevator_dispatch_fn fills the dispatch queue with ready requests. +elevator_dispatch_fn* fills the dispatch queue with ready requests. I/O schedulers are free to postpone requests by not filling the dispatch queue unless @force is non-zero. Once dispatched, I/O schedulers are not allowed to manipulate the requests - they belong to generic dispatch queue. -elevator_add_req_fn called to add a new request into the scheduler +elevator_add_req_fn* called to add a new request into the scheduler elevator_queue_empty_fn returns true if the merge queue is empty. Drivers shouldn't use this, but rather check @@ -990,7 +991,7 @@ elevator_activate_req_fn Called when device driver first sees a request. elevator_deactivate_req_fn Called when device driver decides to delay a request by requeueing it. -elevator_init_fn +elevator_init_fn* elevator_exit_fn Allocate and free any elevator specific storage for a queue. diff --git a/Documentation/cgroups/memcg_test.txt b/Documentation/cgroups/memcg_test.txt index 19533f93b7a2..523a9c16c400 100644 --- a/Documentation/cgroups/memcg_test.txt +++ b/Documentation/cgroups/memcg_test.txt @@ -1,6 +1,6 @@ Memory Resource Controller(Memcg) Implementation Memo. -Last Updated: 2008/12/15 -Base Kernel Version: based on 2.6.28-rc8-mm. +Last Updated: 2009/1/19 +Base Kernel Version: based on 2.6.29-rc2. Because VM is getting complex (one of reasons is memcg...), memcg's behavior is complex. This is a document for memcg's internal behavior. @@ -340,3 +340,23 @@ Under below explanation, we assume CONFIG_MEM_RES_CTRL_SWAP=y. # mount -t cgroup none /cgroup -t cpuset,memory,cpu,devices and do task move, mkdir, rmdir etc...under this. + + 9.7 swapoff. + Besides management of swap is one of complicated parts of memcg, + call path of swap-in at swapoff is not same as usual swap-in path.. + It's worth to be tested explicitly. + + For example, test like following is good. + (Shell-A) + # mount -t cgroup none /cgroup -t memory + # mkdir /cgroup/test + # echo 40M > /cgroup/test/memory.limit_in_bytes + # echo 0 > /cgroup/test/tasks + Run malloc(100M) program under this. You'll see 60M of swaps. + (Shell-B) + # move all tasks in /cgroup/test to /cgroup + # /sbin/swapoff -a + # rmdir /test/cgroup + # kill malloc task. + + Of course, tmpfs v.s. swapoff test should be tested, too. diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt index bbebc3a43ac0..a87be42f8211 100644 --- a/Documentation/filesystems/proc.txt +++ b/Documentation/filesystems/proc.txt @@ -2027,6 +2027,34 @@ increase the likelihood of this process being killed by the oom-killer. Valid values are in the range -16 to +15, plus the special value -17, which disables oom-killing altogether for this process. +The process to be killed in an out-of-memory situation is selected among all others +based on its badness score. This value equals the original memory size of the process +and is then updated according to its CPU time (utime + stime) and the +run time (uptime - start time). The longer it runs the smaller is the score. +Badness score is divided by the square root of the CPU time and then by +the double square root of the run time. + +Swapped out tasks are killed first. Half of each child's memory size is added to +the parent's score if they do not share the same memory. Thus forking servers +are the prime candidates to be killed. Having only one 'hungry' child will make +parent less preferable than the child. + +/proc/<pid>/oom_score shows process' current badness score. + +The following heuristics are then applied: + * if the task was reniced, its score doubles + * superuser or direct hardware access tasks (CAP_SYS_ADMIN, CAP_SYS_RESOURCE + or CAP_SYS_RAWIO) have their score divided by 4 + * if oom condition happened in one cpuset and checked task does not belong + to it, its score is divided by 8 + * the resulting score is multiplied by two to the power of oom_adj, i.e. + points <<= oom_adj when it is positive and + points >>= -(oom_adj) otherwise + +The task with the highest badness score is then selected and its children +are killed, process itself will be killed in an OOM situation when it does +not have children or some of them disabled oom like described above. + 2.13 /proc/<pid>/oom_score - Display current oom-killer score ------------------------------------------------------------- diff --git a/Documentation/ja_JP/stable_kernel_rules.txt b/Documentation/ja_JP/stable_kernel_rules.txt index b3ffe870de33..14265837c4ce 100644 --- a/Documentation/ja_JP/stable_kernel_rules.txt +++ b/Documentation/ja_JP/stable_kernel_rules.txt @@ -12,11 +12,11 @@ file at first. ================================== これは、 -linux-2.6.24/Documentation/stable_kernel_rules.txt +linux-2.6.29/Documentation/stable_kernel_rules.txt の和訳です。 翻訳団体: JF プロジェクト < http://www.linux.or.jp/JF/ > -翻訳日: 2007/12/30 +翻訳日: 2009/1/14 翻訳者: Tsugikazu Shibata <tshibata at ab dot jp dot nec dot com> 校正者: 武井伸光さん、<takei at webmasters dot gr dot jp> かねこさん (Seiji Kaneko) <skaneko at a2 dot mbn dot or dot jp> @@ -38,12 +38,15 @@ linux-2.6.24/Documentation/stable_kernel_rules.txt - ビルドエラー(CONFIG_BROKENになっているものを除く), oops, ハング、デー タ破壊、現実のセキュリティ問題、その他 "ああ、これはダメだね"という ようなものを修正しなければならない。短く言えば、重大な問題。 + - 新しい device ID とクオークも受け入れられる。 - どのように競合状態が発生するかの説明も一緒に書かれていない限り、 "理論的には競合状態になる"ようなものは不可。 - いかなる些細な修正も含めることはできない。(スペルの修正、空白のクリー ンアップなど) - - 対応するサブシステムメンテナが受け入れたものでなければならない。 - Documentation/SubmittingPatches の規則に従ったものでなければならない。 + - パッチ自体か同等の修正が Linus のツリーに既に存在しなければならない。 + Linus のツリーでのコミットID を -stable へのパッチ投稿の際に引用す + ること。 -stable ツリーにパッチを送付する手続き- @@ -52,8 +55,10 @@ linux-2.6.24/Documentation/stable_kernel_rules.txt - 送信者はパッチがキューに受け付けられた際には ACK を、却下された場合 には NAK を受け取る。この反応は開発者たちのスケジュールによって、数 日かかる場合がある。 - - もし受け取られたら、パッチは他の開発者たちのレビューのために - -stable キューに追加される。 + - もし受け取られたら、パッチは他の開発者たちと関連するサブシステムの + メンテナーによるレビューのために -stable キューに追加される。 + - パッチに stable@kernel.org のアドレスが付加されているときには、それ + が Linus のツリーに入る時に自動的に stable チームに email される。 - セキュリティパッチはこのエイリアス (stable@kernel.org) に送られるべ きではなく、代わりに security@kernel.org のアドレスに送られる。 diff --git a/Documentation/lguest/Makefile b/Documentation/lguest/Makefile index 725eef81cd48..1f4f9e888bd1 100644 --- a/Documentation/lguest/Makefile +++ b/Documentation/lguest/Makefile @@ -1,5 +1,5 @@ # This creates the demonstration utility "lguest" which runs a Linux guest. -CFLAGS:=-Wall -Wmissing-declarations -Wmissing-prototypes -O3 -I../../include -I../../arch/x86/include +CFLAGS:=-Wall -Wmissing-declarations -Wmissing-prototypes -O3 -I../../include -I../../arch/x86/include -U_FORTIFY_SOURCE LDLIBS:=-lz all: lguest diff --git a/Documentation/networking/alias.txt b/Documentation/networking/alias.txt index cd12c2ff518a..85046f53fcfc 100644 --- a/Documentation/networking/alias.txt +++ b/Documentation/networking/alias.txt @@ -2,13 +2,13 @@ IP-Aliasing: ============ -IP-aliases are additional IP-addresses/masks hooked up to a base -interface by adding a colon and a string when running ifconfig. -This string is usually numeric, but this is not a must. - -IP-Aliases are avail if CONFIG_INET (`standard' IPv4 networking) -is configured in the kernel. +IP-aliases are an obsolete way to manage multiple IP-addresses/masks +per interface. Newer tools such as iproute2 support multiple +address/prefixes per interface, but aliases are still supported +for backwards compatibility. +An alias is formed by adding a colon and a string when running ifconfig. +This string is usually numeric, but this is not a must. o Alias creation. Alias creation is done by 'magic' interface naming: eg. to create a @@ -38,16 +38,3 @@ o Relationship with main device If the base device is shut down the added aliases will be deleted too. - - -Contact -------- -Please finger or e-mail me: - Juan Jose Ciarlante <jjciarla@raiz.uncu.edu.ar> - -Updated by Erik Schoenfelder <schoenfr@gaertner.DE> - -; local variables: -; mode: indented-text -; mode: auto-fill -; end: diff --git a/Documentation/usb/dma.txt b/Documentation/usb/dma.txt index e8b50b7de9d9..cfdcd16e3abf 100644 --- a/Documentation/usb/dma.txt +++ b/Documentation/usb/dma.txt @@ -6,8 +6,9 @@ in the kernel usb programming guide (kerneldoc, from the source code). API OVERVIEW The big picture is that USB drivers can continue to ignore most DMA issues, -though they still must provide DMA-ready buffers (see DMA-mapping.txt). -That's how they've worked through the 2.4 (and earlier) kernels. +though they still must provide DMA-ready buffers (see +Documentation/PCI/PCI-DMA-mapping.txt). That's how they've worked through +the 2.4 (and earlier) kernels. OR: they can now be DMA-aware. @@ -62,8 +63,8 @@ and effects like cache-trashing can impose subtle penalties. force a consistent memory access ordering by using memory barriers. It's not using a streaming DMA mapping, so it's good for small transfers on systems where the I/O would otherwise thrash an IOMMU mapping. (See - Documentation/DMA-mapping.txt for definitions of "coherent" and "streaming" - DMA mappings.) + Documentation/PCI/PCI-DMA-mapping.txt for definitions of "coherent" and + "streaming" DMA mappings.) Asking for 1/Nth of a page (as well as asking for N pages) is reasonably space-efficient. @@ -93,7 +94,7 @@ WORKING WITH EXISTING BUFFERS Existing buffers aren't usable for DMA without first being mapped into the DMA address space of the device. However, most buffers passed to your driver can safely be used with such DMA mapping. (See the first section -of DMA-mapping.txt, titled "What memory is DMA-able?") +of Documentation/PCI/PCI-DMA-mapping.txt, titled "What memory is DMA-able?") - When you're using scatterlists, you can map everything at once. On some systems, this kicks in an IOMMU and turns the scatterlists into single diff --git a/MAINTAINERS b/MAINTAINERS index d992d407197b..474ec0c53272 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2836,8 +2836,6 @@ S: Maintained MAC80211 P: Johannes Berg M: johannes@sipsolutions.net -P: Michael Wu -M: flamingice@sourmilk.net L: linux-wireless@vger.kernel.org W: http://linuxwireless.org/ T: git kernel.org:/pub/scm/linux/kernel/git/linville/wireless-2.6.git diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 6110197757a3..9fb8aae5c391 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -8,6 +8,7 @@ config ALPHA select HAVE_AOUT select HAVE_IDE select HAVE_OPROFILE + select HAVE_SYSCALL_WRAPPERS help The Alpha is a 64-bit general-purpose processor designed and marketed by the Digital Equipment Corporation of blessed memory, diff --git a/arch/alpha/include/asm/bug.h b/arch/alpha/include/asm/bug.h index 695a5ee4b5d3..7b85b7c93709 100644 --- a/arch/alpha/include/asm/bug.h +++ b/arch/alpha/include/asm/bug.h @@ -8,17 +8,12 @@ /* ??? Would be nice to use .gprel32 here, but we can't be sure that the function loaded the GP, so this could fail in modules. */ -static inline void ATTRIB_NORET __BUG(const char *file, int line) -{ - __asm__ __volatile__( - "call_pal %0 # bugchk\n\t" - ".long %1\n\t.8byte %2" - : : "i" (PAL_bugchk), "i"(line), "i"(file)); - for ( ; ; ) - ; -} - -#define BUG() __BUG(__FILE__, __LINE__) +#define BUG() { \ + __asm__ __volatile__( \ + "call_pal %0 # bugchk\n\t" \ + ".long %1\n\t.8byte %2" \ + : : "i"(PAL_bugchk), "i"(__LINE__), "i"(__FILE__)); \ + for ( ; ; ); } #define HAVE_ARCH_BUG #endif diff --git a/arch/alpha/include/asm/dma-mapping.h b/arch/alpha/include/asm/dma-mapping.h index a5801ae02e4b..04eb5681448c 100644 --- a/arch/alpha/include/asm/dma-mapping.h +++ b/arch/alpha/include/asm/dma-mapping.h @@ -29,6 +29,8 @@ #else /* no PCI - no IOMMU. */ +#include <asm/io.h> /* for virt_to_phys() */ + struct scatterlist; void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp); diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S index aa2e50cf9857..e4a54b615894 100644 --- a/arch/alpha/kernel/entry.S +++ b/arch/alpha/kernel/entry.S @@ -933,7 +933,7 @@ sys_execve: osf_sigprocmask: .prologue 0 mov $sp, $18 - jmp $31, do_osf_sigprocmask + jmp $31, sys_osf_sigprocmask .end osf_sigprocmask .align 4 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index 18a3ea1aac51..ae41f097864b 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c @@ -54,8 +54,7 @@ extern int do_pipe(int *); * identical to OSF as we don't return 0 on success, but doing otherwise * would require changes to libc. Hopefully this is good enough. */ -asmlinkage unsigned long -osf_brk(unsigned long brk) +SYSCALL_DEFINE1(osf_brk, unsigned long, brk) { unsigned long retval = sys_brk(brk); if (brk && brk != retval) @@ -66,9 +65,9 @@ osf_brk(unsigned long brk) /* * This is pure guess-work.. */ -asmlinkage int -osf_set_program_attributes(unsigned long text_start, unsigned long text_len, - unsigned long bss_start, unsigned long bss_len) +SYSCALL_DEFINE4(osf_set_program_attributes, unsigned long, text_start, + unsigned long, text_len, unsigned long, bss_start, + unsigned long, bss_len) { struct mm_struct *mm; @@ -146,9 +145,9 @@ Efault: return -EFAULT; } -asmlinkage int -osf_getdirentries(unsigned int fd, struct osf_dirent __user *dirent, - unsigned int count, long __user *basep) +SYSCALL_DEFINE4(osf_getdirentries, unsigned int, fd, + struct osf_dirent __user *, dirent, unsigned int, count, + long __user *, basep) { int error; struct file *file; @@ -177,9 +176,9 @@ osf_getdirentries(unsigned int fd, struct osf_dirent __user *dirent, #undef NAME_OFFSET -asmlinkage unsigned long -osf_mmap(unsigned long addr, unsigned long len, unsigned long prot, - unsigned long flags, unsigned long fd, unsigned long off) +SYSCALL_DEFINE6(osf_mmap, unsigned long, addr, unsigned long, len, + unsigned long, prot, unsigned long, flags, unsigned long, fd, + unsigned long, off) { struct file *file = NULL; unsigned long ret = -EBADF; @@ -254,8 +253,8 @@ do_osf_statfs(struct dentry * dentry, struct osf_statfs __user *buffer, return error; } -asmlinkage int -osf_statfs(char __user *pathname, struct osf_statfs __user *buffer, unsigned long bufsiz) +SYSCALL_DEFINE3(osf_statfs, char __user *, pathname, + struct osf_statfs __user *, buffer, unsigned long, bufsiz) { struct path path; int retval; @@ -268,8 +267,8 @@ osf_statfs(char __user *pathname, struct osf_statfs __user *buffer, unsigned lon return retval; } -asmlinkage int -osf_fstatfs(unsigned long fd, struct osf_statfs __user *buffer, unsigned long bufsiz) +SYSCALL_DEFINE3(osf_fstatfs, unsigned long, fd, + struct osf_statfs __user *, buffer, unsigned long, bufsiz) { struct file *file; int retval; @@ -368,8 +367,8 @@ osf_procfs_mount(char *dirname, struct procfs_args __user *args, int flags) return do_mount("", dirname, "proc", flags, NULL); } -asmlinkage int -osf_mount(unsigned long typenr, char __user *path, int flag, void __user *data) +SYSCALL_DEFINE4(osf_mount, unsigned long, typenr, char __user *, path, + int, flag, void __user *, data) { int retval = -EINVAL; char *name; @@ -399,8 +398,7 @@ osf_mount(unsigned long typenr, char __user *path, int flag, void __user *data) return retval; } -asmlinkage int -osf_utsname(char __user *name) +SYSCALL_DEFINE1(osf_utsname, char __user *, name) { int error; @@ -423,14 +421,12 @@ osf_utsname(char __user *name) return error; } -asmlinkage unsigned long -sys_getpagesize(void) +SYSCALL_DEFINE0(getpagesize) { return PAGE_SIZE; } -asmlinkage unsigned long -sys_getdtablesize(void) +SYSCALL_DEFINE0(getdtablesize) { return sysctl_nr_open; } @@ -438,8 +434,7 @@ sys_getdtablesize(void) /* * For compatibility with OSF/1 only. Use utsname(2) instead. */ -asmlinkage int -osf_getdomainname(char __user *name, int namelen) +SYSCALL_DEFINE2(osf_getdomainname, char __user *, name, int, namelen) { unsigned len; int i; @@ -527,8 +522,8 @@ enum pl_code { PL_DEL = 5, PL_FDEL = 6 }; -asmlinkage long -osf_proplist_syscall(enum pl_code code, union pl_args __user *args) +SYSCALL_DEFINE2(osf_proplist_syscall, enum pl_code, code, + union pl_args __user *, args) { long error; int __user *min_buf_size_ptr; @@ -567,8 +562,8 @@ osf_proplist_syscall(enum pl_code code, union pl_args __user *args) return error; } -asmlinkage int -osf_sigstack(struct sigstack __user *uss, struct sigstack __user *uoss) +SYSCALL_DEFINE2(osf_sigstack, struct sigstack __user *, uss, + struct sigstack __user *, uoss) { unsigned long usp = rdusp(); unsigned long oss_sp = current->sas_ss_sp + current->sas_ss_size; @@ -608,8 +603,7 @@ osf_sigstack(struct sigstack __user *uss, struct sigstack __user *uoss) return error; } -asmlinkage long -osf_sysinfo(int command, char __user *buf, long count) +SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count) { char *sysinfo_table[] = { utsname()->sysname, @@ -647,9 +641,8 @@ osf_sysinfo(int command, char __user *buf, long count) return err; } -asmlinkage unsigned long -osf_getsysinfo(unsigned long op, void __user *buffer, unsigned long nbytes, - int __user *start, void __user *arg) +SYSCALL_DEFINE5(osf_getsysinfo, unsigned long, op, void __user *, buffer, + unsigned long, nbytes, int __user *, start, void __user *, arg) { unsigned long w; struct percpu_struct *cpu; @@ -705,9 +698,8 @@ osf_getsysinfo(unsigned long op, void __user *buffer, unsigned long nbytes, return -EOPNOTSUPP; } -asmlinkage unsigned long -osf_setsysinfo(unsigned long op, void __user *buffer, unsigned long nbytes, - int __user *start, void __user *arg) +SYSCALL_DEFINE5(osf_setsysinfo, unsigned long, op, void __user *, buffer, + unsigned long, nbytes, int __user *, start, void __user *, arg) { switch (op) { case SSI_IEEE_FP_CONTROL: { @@ -880,8 +872,8 @@ jiffies_to_timeval32(unsigned long jiffies, struct timeval32 *value) value->tv_sec = jiffies / HZ; } -asmlinkage int -osf_gettimeofday(struct timeval32 __user *tv, struct timezone __user *tz) +SYSCALL_DEFINE2(osf_gettimeofday, struct timeval32 __user *, tv, + struct timezone __user *, tz) { if (tv) { struct timeval ktv; @@ -896,8 +888,8 @@ osf_gettimeofday(struct timeval32 __user *tv, struct timezone __user *tz) return 0; } -asmlinkage int -osf_settimeofday(struct timeval32 __user *tv, struct timezone __user *tz) +SYSCALL_DEFINE2(osf_settimeofday, struct timeval32 __user *, tv, + struct timezone __user *, tz) { struct timespec kts; struct timezone ktz; @@ -916,8 +908,7 @@ osf_settimeofday(struct timeval32 __user *tv, struct timezone __user *tz) return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL); } -asmlinkage int -osf_getitimer(int which, struct itimerval32 __user *it) +SYSCALL_DEFINE2(osf_getitimer, int, which, struct itimerval32 __user *, it) { struct itimerval kit; int error; @@ -929,8 +920,8 @@ osf_getitimer(int which, struct itimerval32 __user *it) return error; } -asmlinkage int -osf_setitimer(int which, struct itimerval32 __user *in, struct itimerval32 __user *out) +SYSCALL_DEFINE3(osf_setitimer, int, which, struct itimerval32 __user *, in, + struct itimerval32 __user *, out) { struct itimerval kin, kout; int error; @@ -952,8 +943,8 @@ osf_setitimer(int which, struct itimerval32 __user *in, struct itimerval32 __use } -asmlinkage int -osf_utimes(char __user *filename, struct timeval32 __user *tvs) +SYSCALL_DEFINE2(osf_utimes, char __user *, filename, + struct timeval32 __user *, tvs) { struct timespec tv[2]; @@ -979,9 +970,8 @@ osf_utimes(char __user *filename, struct timeval32 __user *tvs) #define MAX_SELECT_SECONDS \ ((unsigned long) (MAX_SCHEDULE_TIMEOUT / HZ)-1) -asmlinkage int -osf_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, - struct timeval32 __user *tvp) +SYSCALL_DEFINE5(osf_select, int, n, fd_set __user *, inp, fd_set __user *, outp, + fd_set __user *, exp, struct timeval32 __user *, tvp) { struct timespec end_time, *to = NULL; if (tvp) { @@ -1026,8 +1016,7 @@ struct rusage32 { long ru_nivcsw; /* involuntary " */ }; -asmlinkage int -osf_getrusage(int who, struct rusage32 __user *ru) +SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru) { struct rusage32 r; @@ -1053,9 +1042,8 @@ osf_getrusage(int who, struct rusage32 __user *ru) return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0; } -asmlinkage long -osf_wait4(pid_t pid, int __user *ustatus, int options, - struct rusage32 __user *ur) +SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options, + struct rusage32 __user *, ur) { struct rusage r; long ret, err; @@ -1101,8 +1089,8 @@ osf_wait4(pid_t pid, int __user *ustatus, int options, * seems to be a timeval pointer, and I suspect the second * one is the time remaining.. Ho humm.. No documentation. */ -asmlinkage int -osf_usleep_thread(struct timeval32 __user *sleep, struct timeval32 __user *remain) +SYSCALL_DEFINE2(osf_usleep_thread, struct timeval32 __user *, sleep, + struct timeval32 __user *, remain) { struct timeval tmp; unsigned long ticks; @@ -1155,8 +1143,7 @@ struct timex32 { int :32; int :32; int :32; int :32; }; -asmlinkage int -sys_old_adjtimex(struct timex32 __user *txc_p) +SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p) { struct timex txc; int ret; @@ -1267,8 +1254,8 @@ osf_fix_iov_len(const struct iovec __user *iov, unsigned long count) return 0; } -asmlinkage ssize_t -osf_readv(unsigned long fd, const struct iovec __user * vector, unsigned long count) +SYSCALL_DEFINE3(osf_readv, unsigned long, fd, + const struct iovec __user *, vector, unsigned long, count) { if (unlikely(personality(current->personality) == PER_OSF4)) if (osf_fix_iov_len(vector, count)) @@ -1276,8 +1263,8 @@ osf_readv(unsigned long fd, const struct iovec __user * vector, unsigned long co return sys_readv(fd, vector, count); } -asmlinkage ssize_t -osf_writev(unsigned long fd, const struct iovec __user * vector, unsigned long count) +SYSCALL_DEFINE3(osf_writev, unsigned long, fd, + const struct iovec __user *, vector, unsigned long, count) { if (unlikely(personality(current->personality) == PER_OSF4)) if (osf_fix_iov_len(vector, count)) diff --git a/arch/alpha/kernel/pci-noop.c b/arch/alpha/kernel/pci-noop.c index 8ac08311f5a5..c19a376520f4 100644 --- a/arch/alpha/kernel/pci-noop.c +++ b/arch/alpha/kernel/pci-noop.c @@ -109,7 +109,8 @@ sys_pciconfig_write(unsigned long bus, unsigned long dfn, /* Stubs for the routines in pci_iommu.c: */ void * -pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp) +__pci_alloc_consistent(struct pci_dev *pdev, size_t size, + dma_addr_t *dma_addrp, gfp_t gfp) { return NULL; } diff --git a/arch/alpha/kernel/signal.c b/arch/alpha/kernel/signal.c index 410af4f3140e..df65eaa84c4c 100644 --- a/arch/alpha/kernel/signal.c +++ b/arch/alpha/kernel/signal.c @@ -19,6 +19,7 @@ #include <linux/tty.h> #include <linux/binfmts.h> #include <linux/bitops.h> +#include <linux/syscalls.h> #include <asm/uaccess.h> #include <asm/sigcontext.h> @@ -51,8 +52,8 @@ static void do_signal(struct pt_regs *, struct switch_stack *, * Note that we don't need to acquire the kernel lock for SMP * operation, as all of this is local to this thread. */ -asmlinkage unsigned long -do_osf_sigprocmask(int how, unsigned long newmask, struct pt_regs *regs) +SYSCALL_DEFINE3(osf_sigprocmask, int, how, unsigned long, newmask, + struct pt_regs *, regs) { unsigned long oldmask = -EINVAL; @@ -81,9 +82,9 @@ do_osf_sigprocmask(int how, unsigned long newmask, struct pt_regs *regs) return oldmask; } -asmlinkage int -osf_sigaction(int sig, const struct osf_sigaction __user *act, - struct osf_sigaction __user *oact) +SYSCALL_DEFINE3(osf_sigaction, int, sig, + const struct osf_sigaction __user *, act, + struct osf_sigaction __user *, oact) { struct k_sigaction new_ka, old_ka; int ret; @@ -112,10 +113,9 @@ osf_sigaction(int sig, const struct osf_sigaction __user *act, return ret; } -asmlinkage long -sys_rt_sigaction(int sig, const struct sigaction __user *act, - struct sigaction __user *oact, - size_t sigsetsize, void __user *restorer) +SYSCALL_DEFINE5(rt_sigaction, int, sig, const struct sigaction __user *, act, + struct sigaction __user *, oact, + size_t, sigsetsize, void __user *, restorer) { struct k_sigaction new_ka, old_ka; int ret; diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c index d953e510f68d..00f1dc3dfd5f 100644 --- a/arch/alpha/kernel/smp.c +++ b/arch/alpha/kernel/smp.c @@ -120,8 +120,9 @@ void __cpuinit smp_callin(void) { int cpuid = hard_smp_processor_id(); + cpumask_t mask = cpu_online_map; - if (cpu_test_and_set(cpuid, cpu_online_map)) { + if (cpu_test_and_set(cpuid, mask)) { printk("??, cpu 0x%x already present??\n", cpuid); BUG(); } diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S index 9d9e3a98bb95..95c9aef1c106 100644 --- a/arch/alpha/kernel/systbls.S +++ b/arch/alpha/kernel/systbls.S @@ -17,7 +17,7 @@ sys_call_table: .quad sys_write .quad alpha_ni_syscall /* 5 */ .quad sys_close - .quad osf_wait4 + .quad sys_osf_wait4 .quad alpha_ni_syscall .quad sys_link .quad sys_unlink /* 10 */ @@ -27,11 +27,11 @@ sys_call_table: .quad sys_mknod .quad sys_chmod /* 15 */ .quad sys_chown - .quad osf_brk + .quad sys_osf_brk .quad alpha_ni_syscall .quad sys_lseek .quad sys_getxpid /* 20 */ - .quad osf_mount + .quad sys_osf_mount .quad sys_umount .quad sys_setuid .quad sys_getxuid @@ -53,7 +53,7 @@ sys_call_table: .quad alpha_ni_syscall /* 40 */ .quad sys_dup .quad sys_alpha_pipe - .quad osf_set_program_attributes + .quad sys_osf_set_program_attributes .quad alpha_ni_syscall .quad sys_open /* 45 */ .quad alpha_ni_syscall @@ -81,7 +81,7 @@ sys_call_table: .quad sys_newlstat .quad alpha_ni_syscall .quad alpha_ni_syscall /* 70 */ - .quad osf_mmap + .quad sys_osf_mmap .quad alpha_ni_syscall .quad sys_munmap .quad sys_mprotect @@ -94,17 +94,17 @@ sys_call_table: .quad sys_setgroups /* 80 */ .quad alpha_ni_syscall .quad sys_setpgid - .quad osf_setitimer + .quad sys_osf_setitimer .quad alpha_ni_syscall .quad alpha_ni_syscall /* 85 */ - .quad osf_getitimer + .quad sys_osf_getitimer .quad sys_gethostname .quad sys_sethostname .quad sys_getdtablesize .quad sys_dup2 /* 90 */ .quad sys_newfstat .quad sys_fcntl - .quad osf_select + .quad sys_osf_select .quad sys_poll .quad sys_fsync /* 95 */ .quad sys_setpriority @@ -123,22 +123,22 @@ sys_call_table: .quad alpha_ni_syscall .quad alpha_ni_syscall /* 110 */ .quad sys_sigsuspend - .quad osf_sigstack + .quad sys_osf_sigstack .quad sys_recvmsg .quad sys_sendmsg .quad alpha_ni_syscall /* 115 */ - .quad osf_gettimeofday - .quad osf_getrusage + .quad sys_osf_gettimeofday + .quad sys_osf_getrusage .quad sys_getsockopt .quad alpha_ni_syscall #ifdef CONFIG_OSF4_COMPAT - .quad osf_readv /* 120 */ - .quad osf_writev + .quad sys_osf_readv /* 120 */ + .quad sys_osf_writev #else .quad sys_readv /* 120 */ .quad sys_writev #endif - .quad osf_settimeofday + .quad sys_osf_settimeofday .quad sys_fchown .quad sys_fchmod .quad sys_recvfrom /* 125 */ @@ -154,7 +154,7 @@ sys_call_table: .quad sys_socketpair /* 135 */ .quad sys_mkdir .quad sys_rmdir - .quad osf_utimes + .quad sys_osf_utimes .quad alpha_ni_syscall .quad alpha_ni_syscall /* 140 */ .quad sys_getpeername @@ -172,16 +172,16 @@ sys_call_table: .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall /* 155 */ - .quad osf_sigaction + .quad sys_osf_sigaction .quad alpha_ni_syscall .quad alpha_ni_syscall - .quad osf_getdirentries - .quad osf_statfs /* 160 */ - .quad osf_fstatfs + .quad sys_osf_getdirentries + .quad sys_osf_statfs /* 160 */ + .quad sys_osf_fstatfs .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall - .quad osf_getdomainname /* 165 */ + .quad sys_osf_getdomainname /* 165 */ .quad sys_setdomainname .quad alpha_ni_syscall .quad alpha_ni_syscall @@ -224,7 +224,7 @@ sys_call_table: .quad sys_semctl .quad sys_semget /* 205 */ .quad sys_semop - .quad osf_utsname + .quad sys_osf_utsname .quad sys_lchown .quad sys_shmat .quad sys_shmctl /* 210 */ @@ -258,23 +258,23 @@ sys_call_table: .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall /* 240 */ - .quad osf_sysinfo + .quad sys_osf_sysinfo .quad alpha_ni_syscall .quad alpha_ni_syscall - .quad osf_proplist_syscall + .quad sys_osf_proplist_syscall .quad alpha_ni_syscall /* 245 */ .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall /* 250 */ - .quad osf_usleep_thread + .quad sys_osf_usleep_thread .quad alpha_ni_syscall .quad alpha_ni_syscall .quad sys_sysfs .quad alpha_ni_syscall /* 255 */ - .quad osf_getsysinfo - .quad osf_setsysinfo + .quad sys_osf_getsysinfo + .quad sys_osf_setsysinfo .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall /* 260 */ diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index d98f0f4ff83f..6d5e6c5630e3 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c @@ -906,7 +906,7 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) * @dir: R/W or both. * @attrs: optional dma attributes * - * See Documentation/DMA-mapping.txt + * See Documentation/PCI/PCI-DMA-mapping.txt */ dma_addr_t sba_map_single_attrs(struct device *dev, void *addr, size_t size, int dir, @@ -1024,7 +1024,7 @@ sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size) * @dir: R/W or both. * @attrs: optional dma attributes * - * See Documentation/DMA-mapping.txt + * See Documentation/PCI/PCI-DMA-mapping.txt */ void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, int dir, struct dma_attrs *attrs) @@ -1102,7 +1102,7 @@ EXPORT_SYMBOL(sba_unmap_single_attrs); * @size: number of bytes mapped in driver buffer. * @dma_handle: IOVA of new buffer. * - * See Documentation/DMA-mapping.txt + * See Documentation/PCI/PCI-DMA-mapping.txt */ void * sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags) @@ -1165,7 +1165,7 @@ sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp * @vaddr: virtual address IOVA of "consistent" buffer. * @dma_handler: IO virtual address of "consistent" buffer. * - * See Documentation/DMA-mapping.txt + * See Documentation/PCI/PCI-DMA-mapping.txt */ void sba_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) { @@ -1420,7 +1420,7 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev, * @dir: R/W or both. * @attrs: optional dma attributes * - * See Documentation/DMA-mapping.txt + * See Documentation/PCI/PCI-DMA-mapping.txt */ int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, int dir, struct dma_attrs *attrs) @@ -1512,7 +1512,7 @@ EXPORT_SYMBOL(sba_map_sg_attrs); * @dir: R/W or both. * @attrs: optional dma attributes * - * See Documentation/DMA-mapping.txt + * See Documentation/PCI/PCI-DMA-mapping.txt */ void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, int dir, struct dma_attrs *attrs) diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h index 53af696f23d2..da6943380908 100644 --- a/arch/parisc/include/asm/dma-mapping.h +++ b/arch/parisc/include/asm/dma-mapping.h @@ -5,7 +5,7 @@ #include <asm/cacheflush.h> #include <asm/scatterlist.h> -/* See Documentation/DMA-mapping.txt */ +/* See Documentation/PCI/PCI-DMA-mapping.txt */ struct hppa_dma_ops { int (*dma_supported)(struct device *dev, u64 mask); void *(*alloc_consistent)(struct device *dev, size_t size, dma_addr_t *iova, gfp_t flag); diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c index ccd61b9567a6..df47895db828 100644 --- a/arch/parisc/kernel/pci-dma.c +++ b/arch/parisc/kernel/pci-dma.c @@ -2,7 +2,7 @@ ** PARISC 1.1 Dynamic DMA mapping support. ** This implementation is for PA-RISC platforms that do not support ** I/O TLBs (aka DMA address translation hardware). -** See Documentation/DMA-mapping.txt for interface definitions. +** See Documentation/PCI/PCI-DMA-mapping.txt for interface definitions. ** ** (c) Copyright 1999,2000 Hewlett-Packard Company ** (c) Copyright 2000 Grant Grundler diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 4035357f5b9d..132a134d12f2 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h @@ -2,8 +2,8 @@ #define _ASM_X86_DMA_MAPPING_H /* - * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for - * documentation. + * IOMMU interface. See Documentation/PCI/PCI-DMA-mapping.txt and + * Documentation/DMA-API.txt for documentation. */ #include <linux/scatterlist.h> diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index 00c2bcd41463..d5768b1af080 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c @@ -5,7 +5,7 @@ * This allows to use PCI devices that only support 32bit addresses on systems * with more than 4GB. * - * See Documentation/DMA-mapping.txt for the interface specification. + * See Documentation/PCI/PCI-DMA-mapping.txt for the interface specification. * * Copyright 2002 Andi Kleen, SuSE Labs. * Subject to the GNU General Public License v2 only. diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index a7ed208f81e3..92f1c6f3e19d 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -931,7 +931,7 @@ static void lguest_restart(char *reason) * that we can fit comfortably. * * First we need assembly templates of each of the patchable Guest operations, - * and these are in lguest_asm.S. */ + * and these are in i386_head.S. */ /*G:060 We construct a table from the assembler templates: */ static const struct lguest_insns @@ -1093,7 +1093,7 @@ __init void lguest_init(void) acpi_ht = 0; #endif - /* We set the perferred console to "hvc". This is the "hypervisor + /* We set the preferred console to "hvc". This is the "hypervisor * virtual console" driver written by the PowerPC people, which we also * adapted for lguest's use. */ add_preferred_console("hvc", 0, NULL); diff --git a/block/blk-barrier.c b/block/blk-barrier.c index 8eba4e43bb0c..f7dae57e6cab 100644 --- a/block/blk-barrier.c +++ b/block/blk-barrier.c @@ -302,7 +302,7 @@ static void bio_end_empty_barrier(struct bio *bio, int err) * Description: * Issue a flush for the block device in question. Caller can supply * room for storing the error offset in case of a flush error, if they - * wish to. Caller must run wait_for_completion() on its own. + * wish to. */ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector) { diff --git a/block/blk-core.c b/block/blk-core.c index a824e49c0d0a..ca69f3d94100 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -64,11 +64,12 @@ static struct workqueue_struct *kblockd_workqueue; static void drive_stat_acct(struct request *rq, int new_io) { + struct gendisk *disk = rq->rq_disk; struct hd_struct *part; int rw = rq_data_dir(rq); int cpu; - if (!blk_fs_request(rq) || !rq->rq_disk) + if (!blk_fs_request(rq) || !disk || !blk_queue_io_stat(disk->queue)) return; cpu = part_stat_lock(); @@ -599,8 +600,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) q->request_fn = rfn; q->prep_rq_fn = NULL; q->unplug_fn = generic_unplug_device; - q->queue_flags = (1 << QUEUE_FLAG_CLUSTER | - 1 << QUEUE_FLAG_STACKABLE); + q->queue_flags = QUEUE_FLAG_DEFAULT; q->queue_lock = lock; blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK); @@ -1125,6 +1125,8 @@ void init_request_from_bio(struct request *req, struct bio *bio) if (bio_sync(bio)) req->cmd_flags |= REQ_RW_SYNC; + if (bio_unplug(bio)) + req->cmd_flags |= REQ_UNPLUG; if (bio_rw_meta(bio)) req->cmd_flags |= REQ_RW_META; @@ -1141,6 +1143,7 @@ static int __make_request(struct request_queue *q, struct bio *bio) int el_ret, nr_sectors; const unsigned short prio = bio_prio(bio); const int sync = bio_sync(bio); + const int unplug = bio_unplug(bio); int rw_flags; nr_sectors = bio_sectors(bio); @@ -1244,7 +1247,7 @@ get_rq: blk_plug_device(q); add_request(q, req); out: - if (sync || blk_queue_nonrot(q)) + if (unplug || blk_queue_nonrot(q)) __generic_unplug_device(q); spin_unlock_irq(q->queue_lock); return 0; @@ -1448,6 +1451,11 @@ static inline void __generic_make_request(struct bio *bio) err = -EOPNOTSUPP; goto end_io; } + if (bio_barrier(bio) && bio_has_data(bio) && + (q->next_ordered == QUEUE_ORDERED_NONE)) { + err = -EOPNOTSUPP; + goto end_io; + } ret = q->make_request_fn(q, bio); } while (ret); @@ -1655,6 +1663,55 @@ void blkdev_dequeue_request(struct request *req) } EXPORT_SYMBOL(blkdev_dequeue_request); +static void blk_account_io_completion(struct request *req, unsigned int bytes) +{ + struct gendisk *disk = req->rq_disk; + + if (!disk || !blk_queue_io_stat(disk->queue)) + return; + + if (blk_fs_request(req)) { + const int rw = rq_data_dir(req); + struct hd_struct *part; + int cpu; + + cpu = part_stat_lock(); + part = disk_map_sector_rcu(req->rq_disk, req->sector); + part_stat_add(cpu, part, sectors[rw], bytes >> 9); + part_stat_unlock(); + } +} + +static void blk_account_io_done(struct request *req) +{ + struct gendisk *disk = req->rq_disk; + + if (!disk || !blk_queue_io_stat(disk->queue)) + return; + + /* + * Account IO completion. bar_rq isn't accounted as a normal + * IO on queueing nor completion. Accounting the containing + * request is enough. + */ + if (blk_fs_request(req) && req != &req->q->bar_rq) { + unsigned long duration = jiffies - req->start_time; + const int rw = rq_data_dir(req); + struct hd_struct *part; + int cpu; + + cpu = part_stat_lock(); + part = disk_map_sector_rcu(disk, req->sector); + + part_stat_inc(cpu, part, ios[rw]); + part_stat_add(cpu, part, ticks[rw], duration); + part_round_stats(cpu, part); + part_dec_in_flight(part); + + part_stat_unlock(); + } +} + /** * __end_that_request_first - end I/O on a request * @req: the request being processed @@ -1690,16 +1747,7 @@ static int __end_that_request_first(struct request *req, int error, (unsigned long long)req->sector); } - if (blk_fs_request(req) && req->rq_disk) { - const int rw = rq_data_dir(req); - struct hd_struct *part; - int cpu; - - cpu = part_stat_lock(); - part = disk_map_sector_rcu(req->rq_disk, req->sector); - part_stat_add(cpu, part, sectors[rw], nr_bytes >> 9); - part_stat_unlock(); - } + blk_account_io_completion(req, nr_bytes); total_bytes = bio_nbytes = 0; while ((bio = req->bio) != NULL) { @@ -1779,8 +1827,6 @@ static int __end_that_request_first(struct request *req, int error, */ static void end_that_request_last(struct request *req, int error) { - struct gendisk *disk = req->rq_disk; - if (blk_rq_tagged(req)) blk_queue_end_tag(req->q, req); @@ -1792,27 +1838,7 @@ static void end_that_request_last(struct request *req, int error) blk_delete_timer(req); - /* - * Account IO completion. bar_rq isn't accounted as a normal - * IO on queueing nor completion. Accounting the containing - * request is enough. - */ - if (disk && blk_fs_request(req) && req != &req->q->bar_rq) { - unsigned long duration = jiffies - req->start_time; - const int rw = rq_data_dir(req); - struct hd_struct *part; - int cpu; - - cpu = part_stat_lock(); - part = disk_map_sector_rcu(disk, req->sector); - - part_stat_inc(cpu, part, ios[rw]); - part_stat_add(cpu, part, ticks[rw], duration); - part_round_stats(cpu, part); - part_dec_in_flight(part); - - part_stat_unlock(); - } + blk_account_io_done(req); if (req->end_io) req->end_io(req, error); diff --git a/block/blk-integrity.c b/block/blk-integrity.c index 61a8e2f8fdd0..91fa8e06b6a5 100644 --- a/block/blk-integrity.c +++ b/block/blk-integrity.c @@ -309,24 +309,24 @@ static struct kobj_type integrity_ktype = { /** * blk_integrity_register - Register a gendisk as being integrity-capable * @disk: struct gendisk pointer to make integrity-aware - * @template: integrity profile + * @template: optional integrity profile to register * * Description: When a device needs to advertise itself as being able * to send/receive integrity metadata it must use this function to * register the capability with the block layer. The template is a * blk_integrity struct with values appropriate for the underlying - * hardware. See Documentation/block/data-integrity.txt. + * hardware. If template is NULL the new profile is allocated but + * not filled out. See Documentation/block/data-integrity.txt. */ int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template) { struct blk_integrity *bi; BUG_ON(disk == NULL); - BUG_ON(template == NULL); if (disk->integrity == NULL) { bi = kmem_cache_alloc(integrity_cachep, - GFP_KERNEL | __GFP_ZERO); + GFP_KERNEL | __GFP_ZERO); if (!bi) return -1; @@ -346,13 +346,16 @@ int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template) bi = disk->integrity; /* Use the provided profile as template */ - bi->name = template->name; - bi->generate_fn = template->generate_fn; - bi->verify_fn = template->verify_fn; - bi->tuple_size = template->tuple_size; - bi->set_tag_fn = template->set_tag_fn; - bi->get_tag_fn = template->get_tag_fn; - bi->tag_size = template->tag_size; + if (template != NULL) { + bi->name = template->name; + bi->generate_fn = template->generate_fn; + bi->verify_fn = template->verify_fn; + bi->tuple_size = template->tuple_size; + bi->set_tag_fn = template->set_tag_fn; + bi->get_tag_fn = template->get_tag_fn; + bi->tag_size = template->tag_size; + } else + bi->name = "unsupported"; return 0; } diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index a29cb788e408..e29ddfc73cf4 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -130,6 +130,27 @@ static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) return queue_var_show(max_hw_sectors_kb, (page)); } +static ssize_t queue_nonrot_show(struct request_queue *q, char *page) +{ + return queue_var_show(!blk_queue_nonrot(q), page); +} + +static ssize_t queue_nonrot_store(struct request_queue *q, const char *page, + size_t count) +{ + unsigned long nm; + ssize_t ret = queue_var_store(&nm, page, count); + + spin_lock_irq(q->queue_lock); + if (nm) + queue_flag_clear(QUEUE_FLAG_NONROT, q); + else + queue_flag_set(QUEUE_FLAG_NONROT, q); + spin_unlock_irq(q->queue_lock); + + return ret; +} + static ssize_t queue_nomerges_show(struct request_queue *q, char *page) { return queue_var_show(blk_queue_nomerges(q), page); @@ -146,8 +167,8 @@ static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, queue_flag_set(QUEUE_FLAG_NOMERGES, q); else queue_flag_clear(QUEUE_FLAG_NOMERGES, q); - spin_unlock_irq(q->queue_lock); + return ret; } @@ -176,6 +197,27 @@ queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) return ret; } +static ssize_t queue_iostats_show(struct request_queue *q, char *page) +{ + return queue_var_show(blk_queue_io_stat(q), page); +} + +static ssize_t queue_iostats_store(struct request_queue *q, const char *page, + size_t count) +{ + unsigned long stats; + ssize_t ret = queue_var_store(&stats, page, count); + + spin_lock_irq(q->queue_lock); + if (stats) + queue_flag_set(QUEUE_FLAG_IO_STAT, q); + else + queue_flag_clear(QUEUE_FLAG_IO_STAT, q); + spin_unlock_irq(q->queue_lock); + + return ret; +} + static struct queue_sysfs_entry queue_requests_entry = { .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, .show = queue_requests_show, @@ -210,6 +252,12 @@ static struct queue_sysfs_entry queue_hw_sector_size_entry = { .show = queue_hw_sector_size_show, }; +static struct queue_sysfs_entry queue_nonrot_entry = { + .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR }, + .show = queue_nonrot_show, + .store = queue_nonrot_store, +}; + static struct queue_sysfs_entry queue_nomerges_entry = { .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR }, .show = queue_nomerges_show, @@ -222,6 +270,12 @@ static struct queue_sysfs_entry queue_rq_affinity_entry = { .store = queue_rq_affinity_store, }; +static struct queue_sysfs_entry queue_iostats_entry = { + .attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR }, + .show = queue_iostats_show, + .store = queue_iostats_store, +}; + static struct attribute *default_attrs[] = { &queue_requests_entry.attr, &queue_ra_entry.attr, @@ -229,8 +283,10 @@ static struct attribute *default_attrs[] = { &queue_max_sectors_entry.attr, &queue_iosched_entry.attr, &queue_hw_sector_size_entry.attr, + &queue_nonrot_entry.attr, &queue_nomerges_entry.attr, &queue_rq_affinity_entry.attr, + &queue_iostats_entry.attr, NULL, }; diff --git a/block/blktrace.c b/block/blktrace.c index b0a2cae886db..39cc3bfe56e4 100644 --- a/block/blktrace.c +++ b/block/blktrace.c @@ -187,59 +187,12 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, static struct dentry *blk_tree_root; static DEFINE_MUTEX(blk_tree_mutex); -static unsigned int root_users; - -static inline void blk_remove_root(void) -{ - if (blk_tree_root) { - debugfs_remove(blk_tree_root); - blk_tree_root = NULL; - } -} - -static void blk_remove_tree(struct dentry *dir) -{ - mutex_lock(&blk_tree_mutex); - debugfs_remove(dir); - if (--root_users == 0) - blk_remove_root(); - mutex_unlock(&blk_tree_mutex); -} - -static struct dentry *blk_create_tree(const char *blk_name) -{ - struct dentry *dir = NULL; - int created = 0; - - mutex_lock(&blk_tree_mutex); - - if (!blk_tree_root) { - blk_tree_root = debugfs_create_dir("block", NULL); - if (!blk_tree_root) - goto err; - created = 1; - } - - dir = debugfs_create_dir(blk_name, blk_tree_root); - if (dir) - root_users++; - else { - /* Delete root only if we created it */ - if (created) - blk_remove_root(); - } - -err: - mutex_unlock(&blk_tree_mutex); - return dir; -} static void blk_trace_cleanup(struct blk_trace *bt) { - relay_close(bt->rchan); debugfs_remove(bt->msg_file); debugfs_remove(bt->dropped_file); - blk_remove_tree(bt->dir); + relay_close(bt->rchan); free_percpu(bt->sequence); free_percpu(bt->msg_data); kfree(bt); @@ -346,7 +299,18 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf, static int blk_remove_buf_file_callback(struct dentry *dentry) { + struct dentry *parent = dentry->d_parent; debugfs_remove(dentry); + + /* + * this will fail for all but the last file, but that is ok. what we + * care about is the top level buts->name directory going away, when + * the last trace file is gone. Then we don't have to rmdir() that + * manually on trace stop, so it nicely solves the issue with + * force killing of running traces. + */ + + debugfs_remove(parent); return 0; } @@ -404,7 +368,15 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, goto err; ret = -ENOENT; - dir = blk_create_tree(buts->name); + + if (!blk_tree_root) { + blk_tree_root = debugfs_create_dir("block", NULL); + if (!blk_tree_root) + return -ENOMEM; + } + + dir = debugfs_create_dir(buts->name, blk_tree_root); + if (!dir) goto err; @@ -458,8 +430,6 @@ probe_err: atomic_dec(&blk_probes_ref); mutex_unlock(&blk_probe_mutex); err: - if (dir) - blk_remove_tree(dir); if (bt) { if (bt->msg_file) debugfs_remove(bt->msg_file); diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index e8525fa72823..664ebfd092ec 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -84,6 +84,11 @@ struct cfq_data { */ struct cfq_rb_root service_tree; unsigned int busy_queues; + /* + * Used to track any pending rt requests so we can pre-empt current + * non-RT cfqq in service when this value is non-zero. + */ + unsigned int busy_rt_queues; int rq_in_driver; int sync_flight; @@ -562,6 +567,8 @@ static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) BUG_ON(cfq_cfqq_on_rr(cfqq)); cfq_mark_cfqq_on_rr(cfqq); cfqd->busy_queues++; + if (cfq_class_rt(cfqq)) + cfqd->busy_rt_queues++; cfq_resort_rr_list(cfqd, cfqq); } @@ -581,6 +588,8 @@ static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) BUG_ON(!cfqd->busy_queues); cfqd->busy_queues--; + if (cfq_class_rt(cfqq)) + cfqd->busy_rt_queues--; } /* @@ -1005,6 +1014,20 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) goto expire; /* + * If we have a RT cfqq waiting, then we pre-empt the current non-rt + * cfqq. + */ + if (!cfq_class_rt(cfqq) && cfqd->busy_rt_queues) { + /* + * We simulate this as cfqq timed out so that it gets to bank + * the remaining of its time slice. + */ + cfq_log_cfqq(cfqd, cfqq, "preempt"); + cfq_slice_expired(cfqd, 1); + goto new_queue; + } + + /* * The active queue has requests and isn't expired, allow it to * dispatch. */ @@ -1067,6 +1090,13 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq, if (RB_EMPTY_ROOT(&cfqq->sort_list)) break; + /* + * If there is a non-empty RT cfqq waiting for current + * cfqq's timeslice to complete, pre-empt this cfqq + */ + if (!cfq_class_rt(cfqq) && cfqd->busy_rt_queues) + break; + } while (dispatched < max_dispatch); /* @@ -1801,6 +1831,12 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, if (rq_is_meta(rq) && !cfqq->meta_pending) return 1; + /* + * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice. + */ + if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq)) + return 1; + if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq)) return 0; @@ -1870,7 +1906,8 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, /* * not the active queue - expire current slice if it is * idle and has expired it's mean thinktime or this new queue - * has some old slice time left and is of higher priority + * has some old slice time left and is of higher priority or + * this new queue is RT and the current one is BE */ cfq_preempt_queue(cfqd, cfqq); cfq_mark_cfqq_must_dispatch(cfqq); diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c index bfd55b085ae6..9f029595f454 100644 --- a/drivers/ata/sata_sil.c +++ b/drivers/ata/sata_sil.c @@ -44,6 +44,7 @@ #include <linux/device.h> #include <scsi/scsi_host.h> #include <linux/libata.h> +#include <linux/dmi.h> #define DRV_NAME "sata_sil" #define DRV_VERSION "2.4" diff --git a/drivers/base/core.c b/drivers/base/core.c index 55e530942ab0..f3eae630e589 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -1280,7 +1280,7 @@ EXPORT_SYMBOL_GPL(__root_device_register); /** * root_device_unregister - unregister and free a root device - * @root: device going away. + * @dev: device going away * * This function unregisters and cleans up a device that was created by * root_device_register(). diff --git a/drivers/firewire/fw-card.c b/drivers/firewire/fw-card.c index 6bd91a15d5e6..7be2cf3514e7 100644 --- a/drivers/firewire/fw-card.c +++ b/drivers/firewire/fw-card.c @@ -232,7 +232,7 @@ fw_card_bm_work(struct work_struct *work) root_id = root_node->node_id; grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 10)); - if (card->bm_generation + 1 == generation || + if (is_next_generation(generation, card->bm_generation) || (card->bm_generation != generation && grace)) { /* * This first step is to figure out who is IRM and @@ -512,7 +512,7 @@ fw_core_remove_card(struct fw_card *card) fw_core_initiate_bus_reset(card, 1); mutex_lock(&card_mutex); - list_del(&card->link); + list_del_init(&card->link); mutex_unlock(&card_mutex); /* Set up the dummy driver. */ diff --git a/drivers/firewire/fw-device.c b/drivers/firewire/fw-device.c index 2af5a8d1e012..bf53acb45652 100644 --- a/drivers/firewire/fw-device.c +++ b/drivers/firewire/fw-device.c @@ -25,6 +25,7 @@ #include <linux/device.h> #include <linux/delay.h> #include <linux/idr.h> +#include <linux/jiffies.h> #include <linux/string.h> #include <linux/rwsem.h> #include <linux/semaphore.h> @@ -634,12 +635,39 @@ struct fw_device *fw_device_get_by_devt(dev_t devt) return device; } +/* + * These defines control the retry behavior for reading the config + * rom. It shouldn't be necessary to tweak these; if the device + * doesn't respond to a config rom read within 10 seconds, it's not + * going to respond at all. As for the initial delay, a lot of + * devices will be able to respond within half a second after bus + * reset. On the other hand, it's not really worth being more + * aggressive than that, since it scales pretty well; if 10 devices + * are plugged in, they're all getting read within one second. + */ + +#define MAX_RETRIES 10 +#define RETRY_DELAY (3 * HZ) +#define INITIAL_DELAY (HZ / 2) +#define SHUTDOWN_DELAY (2 * HZ) + static void fw_device_shutdown(struct work_struct *work) { struct fw_device *device = container_of(work, struct fw_device, work.work); int minor = MINOR(device->device.devt); + if (time_is_after_jiffies(device->card->reset_jiffies + SHUTDOWN_DELAY) + && !list_empty(&device->card->link)) { + schedule_delayed_work(&device->work, SHUTDOWN_DELAY); + return; + } + + if (atomic_cmpxchg(&device->state, + FW_DEVICE_GONE, + FW_DEVICE_SHUTDOWN) != FW_DEVICE_GONE) + return; + fw_device_cdev_remove(device); device_for_each_child(&device->device, NULL, shutdown_unit); device_unregister(&device->device); @@ -647,6 +675,7 @@ static void fw_device_shutdown(struct work_struct *work) down_write(&fw_device_rwsem); idr_remove(&fw_device_idr, minor); up_write(&fw_device_rwsem); + fw_device_put(device); } @@ -654,25 +683,63 @@ static struct device_type fw_device_type = { .release = fw_device_release, }; +static void fw_device_update(struct work_struct *work); + /* - * These defines control the retry behavior for reading the config - * rom. It shouldn't be necessary to tweak these; if the device - * doesn't respond to a config rom read within 10 seconds, it's not - * going to respond at all. As for the initial delay, a lot of - * devices will be able to respond within half a second after bus - * reset. On the other hand, it's not really worth being more - * aggressive than that, since it scales pretty well; if 10 devices - * are plugged in, they're all getting read within one second. + * If a device was pending for deletion because its node went away but its + * bus info block and root directory header matches that of a newly discovered + * device, revive the existing fw_device. + * The newly allocated fw_device becomes obsolete instead. */ +static int lookup_existing_device(struct device *dev, void *data) +{ + struct fw_device *old = fw_device(dev); + struct fw_device *new = data; + struct fw_card *card = new->card; + int match = 0; + + down_read(&fw_device_rwsem); /* serialize config_rom access */ + spin_lock_irq(&card->lock); /* serialize node access */ + + if (memcmp(old->config_rom, new->config_rom, 6 * 4) == 0 && + atomic_cmpxchg(&old->state, + FW_DEVICE_GONE, + FW_DEVICE_RUNNING) == FW_DEVICE_GONE) { + struct fw_node *current_node = new->node; + struct fw_node *obsolete_node = old->node; + + new->node = obsolete_node; + new->node->data = new; + old->node = current_node; + old->node->data = old; + + old->max_speed = new->max_speed; + old->node_id = current_node->node_id; + smp_wmb(); /* update node_id before generation */ + old->generation = card->generation; + old->config_rom_retries = 0; + fw_notify("rediscovered device %s\n", dev_name(dev)); -#define MAX_RETRIES 10 -#define RETRY_DELAY (3 * HZ) -#define INITIAL_DELAY (HZ / 2) + PREPARE_DELAYED_WORK(&old->work, fw_device_update); + schedule_delayed_work(&old->work, 0); + + if (current_node == card->root_node) + fw_schedule_bm_work(card, 0); + + match = 1; + } + + spin_unlock_irq(&card->lock); + up_read(&fw_device_rwsem); + + return match; +} static void fw_device_init(struct work_struct *work) { struct fw_device *device = container_of(work, struct fw_device, work.work); + struct device *revived_dev; int minor, err; /* @@ -696,6 +763,15 @@ static void fw_device_init(struct work_struct *work) return; } + revived_dev = device_find_child(device->card->device, + device, lookup_existing_device); + if (revived_dev) { + put_device(revived_dev); + fw_device_release(&device->device); + + return; + } + device_initialize(&device->device); fw_device_get(device); @@ -734,9 +810,10 @@ static void fw_device_init(struct work_struct *work) * fw_node_event(). */ if (atomic_cmpxchg(&device->state, - FW_DEVICE_INITIALIZING, - FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN) { - fw_device_shutdown(work); + FW_DEVICE_INITIALIZING, + FW_DEVICE_RUNNING) == FW_DEVICE_GONE) { + PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown); + schedule_delayed_work(&device->work, SHUTDOWN_DELAY); } else { if (device->config_rom_retries) fw_notify("created device %s: GUID %08x%08x, S%d00, " @@ -847,8 +924,8 @@ static void fw_device_refresh(struct work_struct *work) case REREAD_BIB_UNCHANGED: if (atomic_cmpxchg(&device->state, - FW_DEVICE_INITIALIZING, - FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN) + FW_DEVICE_INITIALIZING, + FW_DEVICE_RUNNING) == FW_DEVICE_GONE) goto gone; fw_device_update(work); @@ -879,8 +956,8 @@ static void fw_device_refresh(struct work_struct *work) create_units(device); if (atomic_cmpxchg(&device->state, - FW_DEVICE_INITIALIZING, - FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN) + FW_DEVICE_INITIALIZING, + FW_DEVICE_RUNNING) == FW_DEVICE_GONE) goto gone; fw_notify("refreshed device %s\n", dev_name(&device->device)); @@ -890,8 +967,9 @@ static void fw_device_refresh(struct work_struct *work) give_up: fw_notify("giving up on refresh of device %s\n", dev_name(&device->device)); gone: - atomic_set(&device->state, FW_DEVICE_SHUTDOWN); - fw_device_shutdown(work); + atomic_set(&device->state, FW_DEVICE_GONE); + PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown); + schedule_delayed_work(&device->work, SHUTDOWN_DELAY); out: if (node_id == card->root_node->node_id) fw_schedule_bm_work(card, 0); @@ -995,9 +1073,10 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event) */ device = node->data; if (atomic_xchg(&device->state, - FW_DEVICE_SHUTDOWN) == FW_DEVICE_RUNNING) { + FW_DEVICE_GONE) == FW_DEVICE_RUNNING) { PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown); - schedule_delayed_work(&device->work, 0); + schedule_delayed_work(&device->work, + list_empty(&card->link) ? 0 : SHUTDOWN_DELAY); } break; } diff --git a/drivers/firewire/fw-device.h b/drivers/firewire/fw-device.h index df51732608d9..8ef6ec2ca21c 100644 --- a/drivers/firewire/fw-device.h +++ b/drivers/firewire/fw-device.h @@ -28,6 +28,7 @@ enum fw_device_state { FW_DEVICE_INITIALIZING, FW_DEVICE_RUNNING, + FW_DEVICE_GONE, FW_DEVICE_SHUTDOWN, }; diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c index ab9c01e462ef..6d19828a93a5 100644 --- a/drivers/firewire/fw-ohci.c +++ b/drivers/firewire/fw-ohci.c @@ -226,7 +226,7 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card) #define CONTEXT_DEAD 0x0800 #define CONTEXT_ACTIVE 0x0400 -#define OHCI1394_MAX_AT_REQ_RETRIES 0x2 +#define OHCI1394_MAX_AT_REQ_RETRIES 0xf #define OHCI1394_MAX_AT_RESP_RETRIES 0x2 #define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8 @@ -896,11 +896,11 @@ static void context_stop(struct context *ctx) for (i = 0; i < 10; i++) { reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs)); if ((reg & CONTEXT_ACTIVE) == 0) - break; + return; - fw_notify("context_stop: still active (0x%08x)\n", reg); mdelay(1); } + fw_error("Error: DMA context still active (0x%08x)\n", reg); } struct driver_data { diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c index e88d5067448c..c71c4419d9e8 100644 --- a/drivers/firewire/fw-sbp2.c +++ b/drivers/firewire/fw-sbp2.c @@ -168,6 +168,7 @@ struct sbp2_target { int address_high; unsigned int workarounds; unsigned int mgt_orb_timeout; + unsigned int max_payload; int dont_block; /* counter for each logical unit */ int blocked; /* ditto */ @@ -310,14 +311,16 @@ struct sbp2_command_orb { dma_addr_t page_table_bus; }; +#define SBP2_ROM_VALUE_WILDCARD ~0 /* match all */ +#define SBP2_ROM_VALUE_MISSING 0xff000000 /* not present in the unit dir. */ + /* * List of devices with known bugs. * * The firmware_revision field, masked with 0xffff00, is the best * indicator for the type of bridge chip of a device. It yields a few * false positives but this did not break correctly behaving devices - * so far. We use ~0 as a wildcard, since the 24 bit values we get - * from the config rom can never match that. + * so far. */ static const struct { u32 firmware_revision; @@ -339,33 +342,35 @@ static const struct { }, /* Initio bridges, actually only needed for some older ones */ { .firmware_revision = 0x000200, - .model = ~0, + .model = SBP2_ROM_VALUE_WILDCARD, .workarounds = SBP2_WORKAROUND_INQUIRY_36, }, /* PL-3507 bridge with Prolific firmware */ { .firmware_revision = 0x012800, - .model = ~0, + .model = SBP2_ROM_VALUE_WILDCARD, .workarounds = SBP2_WORKAROUND_POWER_CONDITION, }, /* Symbios bridge */ { .firmware_revision = 0xa0b800, - .model = ~0, + .model = SBP2_ROM_VALUE_WILDCARD, .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS, }, /* Datafab MD2-FW2 with Symbios/LSILogic SYM13FW500 bridge */ { .firmware_revision = 0x002600, - .model = ~0, + .model = SBP2_ROM_VALUE_WILDCARD, .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS, }, - /* - * There are iPods (2nd gen, 3rd gen) with model_id == 0, but - * these iPods do not feature the read_capacity bug according - * to one report. Read_capacity behaviour as well as model_id - * could change due to Apple-supplied firmware updates though. + * iPod 2nd generation: needs 128k max transfer size workaround + * iPod 3rd generation: needs fix capacity workaround */ - - /* iPod 4th generation. */ { + { + .firmware_revision = 0x0a2700, + .model = 0x000000, + .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS | + SBP2_WORKAROUND_FIX_CAPACITY, + }, + /* iPod 4th generation */ { .firmware_revision = 0x0a2700, .model = 0x000021, .workarounds = SBP2_WORKAROUND_FIX_CAPACITY, @@ -1092,7 +1097,7 @@ static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model, continue; if (sbp2_workarounds_table[i].model != model && - sbp2_workarounds_table[i].model != ~0) + sbp2_workarounds_table[i].model != SBP2_ROM_VALUE_WILDCARD) continue; w |= sbp2_workarounds_table[i].workarounds; @@ -1142,20 +1147,28 @@ static int sbp2_probe(struct device *dev) fw_device_get(device); fw_unit_get(unit); - /* Initialize to values that won't match anything in our table. */ - firmware_revision = 0xff000000; - model = 0xff000000; - /* implicit directory ID */ tgt->directory_id = ((unit->directory - device->config_rom) * 4 + CSR_CONFIG_ROM) & 0xffffff; + firmware_revision = SBP2_ROM_VALUE_MISSING; + model = SBP2_ROM_VALUE_MISSING; + if (sbp2_scan_unit_dir(tgt, unit->directory, &model, &firmware_revision) < 0) goto fail_tgt_put; sbp2_init_workarounds(tgt, model, firmware_revision); + /* + * At S100 we can do 512 bytes per packet, at S200 1024 bytes, + * and so on up to 4096 bytes. The SBP-2 max_payload field + * specifies the max payload size as 2 ^ (max_payload + 2), so + * if we set this to max_speed + 7, we get the right value. + */ + tgt->max_payload = min(device->max_speed + 7, 10U); + tgt->max_payload = min(tgt->max_payload, device->card->max_receive - 1); + /* Do the login in a workqueue so we can easily reschedule retries. */ list_for_each_entry(lu, &tgt->lu_list, link) sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5)); @@ -1273,6 +1286,19 @@ static struct fw_driver sbp2_driver = { .id_table = sbp2_id_table, }; +static void sbp2_unmap_scatterlist(struct device *card_device, + struct sbp2_command_orb *orb) +{ + if (scsi_sg_count(orb->cmd)) + dma_unmap_sg(card_device, scsi_sglist(orb->cmd), + scsi_sg_count(orb->cmd), + orb->cmd->sc_data_direction); + + if (orb->request.misc & cpu_to_be32(COMMAND_ORB_PAGE_TABLE_PRESENT)) + dma_unmap_single(card_device, orb->page_table_bus, + sizeof(orb->page_table), DMA_TO_DEVICE); +} + static unsigned int sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data) { @@ -1352,15 +1378,7 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) dma_unmap_single(device->card->device, orb->base.request_bus, sizeof(orb->request), DMA_TO_DEVICE); - - if (scsi_sg_count(orb->cmd) > 0) - dma_unmap_sg(device->card->device, scsi_sglist(orb->cmd), - scsi_sg_count(orb->cmd), - orb->cmd->sc_data_direction); - - if (orb->page_table_bus != 0) - dma_unmap_single(device->card->device, orb->page_table_bus, - sizeof(orb->page_table), DMA_TO_DEVICE); + sbp2_unmap_scatterlist(device->card->device, orb); orb->cmd->result = result; orb->done(orb->cmd); @@ -1434,7 +1452,6 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) struct sbp2_logical_unit *lu = cmd->device->hostdata; struct fw_device *device = fw_device(lu->tgt->unit->device.parent); struct sbp2_command_orb *orb; - unsigned int max_payload; int generation, retval = SCSI_MLQUEUE_HOST_BUSY; /* @@ -1462,17 +1479,9 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) orb->done = done; orb->cmd = cmd; - orb->request.next.high = cpu_to_be32(SBP2_ORB_NULL); - /* - * At speed 100 we can do 512 bytes per packet, at speed 200, - * 1024 bytes per packet etc. The SBP-2 max_payload field - * specifies the max payload size as 2 ^ (max_payload + 2), so - * if we set this to max_speed + 7, we get the right value. - */ - max_payload = min(device->max_speed + 7, - device->card->max_receive - 1); + orb->request.next.high = cpu_to_be32(SBP2_ORB_NULL); orb->request.misc = cpu_to_be32( - COMMAND_ORB_MAX_PAYLOAD(max_payload) | + COMMAND_ORB_MAX_PAYLOAD(lu->tgt->max_payload) | COMMAND_ORB_SPEED(device->max_speed) | COMMAND_ORB_NOTIFY); @@ -1491,8 +1500,10 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) orb->base.request_bus = dma_map_single(device->card->device, &orb->request, sizeof(orb->request), DMA_TO_DEVICE); - if (dma_mapping_error(device->card->device, orb->base.request_bus)) + if (dma_mapping_error(device->card->device, orb->base.request_bus)) { + sbp2_unmap_scatterlist(device->card->device, orb); goto out; + } sbp2_send_orb(&orb->base, lu, lu->tgt->node_id, generation, lu->command_block_agent_address + SBP2_ORB_POINTER); diff --git a/drivers/firewire/fw-topology.c b/drivers/firewire/fw-topology.c index c9be6e6948c4..8dd6703b55cd 100644 --- a/drivers/firewire/fw-topology.c +++ b/drivers/firewire/fw-topology.c @@ -518,6 +518,18 @@ fw_core_handle_bus_reset(struct fw_card *card, struct fw_node *local_node; unsigned long flags; + /* + * If the selfID buffer is not the immediate successor of the + * previously processed one, we cannot reliably compare the + * old and new topologies. + */ + if (!is_next_generation(generation, card->generation) && + card->local_node != NULL) { + fw_notify("skipped bus generations, destroying all nodes\n"); + fw_destroy_nodes(card); + card->bm_retries = 0; + } + spin_lock_irqsave(&card->lock, flags); card->node_id = node_id; diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h index c9ab12a15f6e..1d78e9cc5940 100644 --- a/drivers/firewire/fw-transaction.h +++ b/drivers/firewire/fw-transaction.h @@ -276,6 +276,15 @@ static inline void fw_card_put(struct fw_card *card) extern void fw_schedule_bm_work(struct fw_card *card, unsigned long delay); /* + * Check whether new_generation is the immediate successor of old_generation. + * Take counter roll-over at 255 (as per to OHCI) into account. + */ +static inline bool is_next_generation(int new_generation, int old_generation) +{ + return (new_generation & 0xff) == ((old_generation + 1) & 0xff); +} + +/* * The iso packet format allows for an immediate header/payload part * stored in 'header' immediately after the packet info plus an * indirect payload part that is pointer to by the 'payload' field. diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 35e7aea4222c..42fb2fd24c0c 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -789,6 +789,7 @@ int gpio_request(unsigned gpio, const char *label) } else { status = -EBUSY; module_put(chip->owner); + goto done; } if (chip->request) { diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 6b1148fc2cbe..b36a5214d8df 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -311,7 +311,7 @@ static int intel_lvds_get_modes(struct drm_connector *connector) if (dev_priv->panel_fixed_mode != NULL) { struct drm_display_mode *mode; - mutex_unlock(&dev->mode_config.mutex); + mutex_lock(&dev->mode_config.mutex); mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); drm_mode_probed_add(connector, mode); mutex_unlock(&dev->mode_config.mutex); diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 5d7640e49dc5..6cad69ed21c5 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1218,6 +1218,7 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask) } EXPORT_SYMBOL_GPL(hid_connect); +/* a list of devices for which there is a specialized driver on HID bus */ static const struct hid_device_id hid_blacklist[] = { { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) }, { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) }, @@ -1476,6 +1477,7 @@ static struct bus_type hid_bus_type = { .uevent = hid_uevent, }; +/* a list of devices that shouldn't be handled by HID core at all */ static const struct hid_device_id hid_ignore_list[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_ACECAD_FLAIR) }, { HID_USB_DEVICE(USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_ACECAD_302) }, @@ -1606,6 +1608,8 @@ static const struct hid_device_id hid_ignore_list[] = { { HID_USB_DEVICE(USB_VENDOR_ID_SOUNDGRAPH, USB_DEVICE_ID_SOUNDGRAPH_IMON_LCD) }, { HID_USB_DEVICE(USB_VENDOR_ID_SOUNDGRAPH, USB_DEVICE_ID_SOUNDGRAPH_IMON_LCD2) }, { HID_USB_DEVICE(USB_VENDOR_ID_SOUNDGRAPH, USB_DEVICE_ID_SOUNDGRAPH_IMON_LCD3) }, + { HID_USB_DEVICE(USB_VENDOR_ID_SOUNDGRAPH, USB_DEVICE_ID_SOUNDGRAPH_IMON_LCD4) }, + { HID_USB_DEVICE(USB_VENDOR_ID_SOUNDGRAPH, USB_DEVICE_ID_SOUNDGRAPH_IMON_LCD5) }, { HID_USB_DEVICE(USB_VENDOR_ID_TENX, USB_DEVICE_ID_TENX_IBUDDY1) }, { HID_USB_DEVICE(USB_VENDOR_ID_TENX, USB_DEVICE_ID_TENX_IBUDDY2) }, { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) }, diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index acc1abc834a4..e899f510ebeb 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -362,6 +362,8 @@ #define USB_DEVICE_ID_SOUNDGRAPH_IMON_LCD 0x0038 #define USB_DEVICE_ID_SOUNDGRAPH_IMON_LCD2 0x0036 #define USB_DEVICE_ID_SOUNDGRAPH_IMON_LCD3 0x0034 +#define USB_DEVICE_ID_SOUNDGRAPH_IMON_LCD4 0x0044 +#define USB_DEVICE_ID_SOUNDGRAPH_IMON_LCD5 0x0045 #define USB_VENDOR_ID_SUN 0x0430 #define USB_DEVICE_ID_RARITAN_KVM_DONGLE 0xcdab diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c index d718b1607d0f..25b10dcad90d 100644 --- a/drivers/hid/hid-microsoft.c +++ b/drivers/hid/hid-microsoft.c @@ -30,7 +30,7 @@ #define MS_NOGET 0x10 /* - * Microsoft Wireless Desktop Receiver (Model 1028) has several + * Microsoft Wireless Desktop Receiver (Model 1028) has * 'Usage Min/Max' where it ought to have 'Physical Min/Max' */ static void ms_report_fixup(struct hid_device *hdev, __u8 *rdesc, @@ -38,17 +38,12 @@ static void ms_report_fixup(struct hid_device *hdev, __u8 *rdesc, { unsigned long quirks = (unsigned long)hid_get_drvdata(hdev); - if ((quirks & MS_RDESC) && rsize == 571 && rdesc[284] == 0x19 && - rdesc[286] == 0x2a && rdesc[304] == 0x19 && - rdesc[306] == 0x29 && rdesc[352] == 0x1a && - rdesc[355] == 0x2a && rdesc[557] == 0x19 && + if ((quirks & MS_RDESC) && rsize == 571 && rdesc[557] == 0x19 && rdesc[559] == 0x29) { dev_info(&hdev->dev, "fixing up Microsoft Wireless Receiver " "Model 1028 report descriptor\n"); - rdesc[284] = rdesc[304] = rdesc[557] = 0x35; - rdesc[352] = 0x36; - rdesc[286] = rdesc[355] = 0x46; - rdesc[306] = rdesc[559] = 0x45; + rdesc[557] = 0x35; + rdesc[559] = 0x45; } } diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c index d73eea382ab3..4940e4d70c2d 100644 --- a/drivers/hid/usbhid/hiddev.c +++ b/drivers/hid/usbhid/hiddev.c @@ -656,7 +656,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case HIDIOCGSTRING: mutex_lock(&hiddev->existancelock); - if (!hiddev->exist) + if (hiddev->exist) r = hiddev_ioctl_string(hiddev, cmd, user_arg); else r = -ENODEV; diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c index e30186236588..678e34b01e52 100644 --- a/drivers/hwmon/applesmc.c +++ b/drivers/hwmon/applesmc.c @@ -83,7 +83,7 @@ /* * Temperature sensors keys (sp78 - 2 bytes). */ -static const char* temperature_sensors_sets[][36] = { +static const char *temperature_sensors_sets[][41] = { /* Set 0: Macbook Pro */ { "TA0P", "TB0T", "TC0D", "TC0P", "TG0H", "TG0P", "TG0T", "Th0H", "Th1H", "Tm0P", "Ts0P", "Ts1P", NULL }, @@ -135,6 +135,13 @@ static const char* temperature_sensors_sets[][36] = { { "TB0T", "TB1S", "TB1T", "TB2S", "TB2T", "TC0D", "TN0D", "TTF0", "TV0P", "TVFP", "TW0P", "Th0P", "Tp0P", "Tp1P", "TpFP", "Ts0P", "Ts0S", NULL }, +/* Set 16: Mac Pro 3,1 (2 x Quad-Core) */ + { "TA0P", "TCAG", "TCAH", "TCBG", "TCBH", "TC0C", "TC0D", "TC0P", + "TC1C", "TC1D", "TC2C", "TC2D", "TC3C", "TC3D", "TH0P", "TH1P", + "TH2P", "TH3P", "TMAP", "TMAS", "TMBS", "TM0P", "TM0S", "TM1P", + "TM1S", "TM2P", "TM2S", "TM3S", "TM8P", "TM8S", "TM9P", "TM9S", + "TN0C", "TN0D", "TN0H", "TS0C", "Tp0C", "Tp1C", "Tv0S", "Tv1S", + NULL }, }; /* List of keys used to read/write fan speeds */ @@ -1153,6 +1160,16 @@ static SENSOR_DEVICE_ATTR(temp34_input, S_IRUGO, applesmc_show_temperature, NULL, 33); static SENSOR_DEVICE_ATTR(temp35_input, S_IRUGO, applesmc_show_temperature, NULL, 34); +static SENSOR_DEVICE_ATTR(temp36_input, S_IRUGO, + applesmc_show_temperature, NULL, 35); +static SENSOR_DEVICE_ATTR(temp37_input, S_IRUGO, + applesmc_show_temperature, NULL, 36); +static SENSOR_DEVICE_ATTR(temp38_input, S_IRUGO, + applesmc_show_temperature, NULL, 37); +static SENSOR_DEVICE_ATTR(temp39_input, S_IRUGO, + applesmc_show_temperature, NULL, 38); +static SENSOR_DEVICE_ATTR(temp40_input, S_IRUGO, + applesmc_show_temperature, NULL, 39); static struct attribute *temperature_attributes[] = { &sensor_dev_attr_temp1_input.dev_attr.attr, @@ -1190,6 +1207,11 @@ static struct attribute *temperature_attributes[] = { &sensor_dev_attr_temp33_input.dev_attr.attr, &sensor_dev_attr_temp34_input.dev_attr.attr, &sensor_dev_attr_temp35_input.dev_attr.attr, + &sensor_dev_attr_temp36_input.dev_attr.attr, + &sensor_dev_attr_temp37_input.dev_attr.attr, + &sensor_dev_attr_temp38_input.dev_attr.attr, + &sensor_dev_attr_temp39_input.dev_attr.attr, + &sensor_dev_attr_temp40_input.dev_attr.attr, NULL }; @@ -1312,6 +1334,8 @@ static __initdata struct dmi_match_data applesmc_dmi_data[] = { { .accelerometer = 0, .light = 0, .temperature_set = 14 }, /* MacBook Air 2,1: accelerometer, backlight and temperature set 15 */ { .accelerometer = 1, .light = 1, .temperature_set = 15 }, +/* MacPro3,1: temperature set 16 */ + { .accelerometer = 0, .light = 0, .temperature_set = 16 }, }; /* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1". @@ -1369,6 +1393,10 @@ static __initdata struct dmi_system_id applesmc_whitelist[] = { DMI_MATCH(DMI_BOARD_VENDOR,"Apple"), DMI_MATCH(DMI_PRODUCT_NAME,"MacPro2") }, &applesmc_dmi_data[4]}, + { applesmc_dmi_match, "Apple MacPro3", { + DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), + DMI_MATCH(DMI_PRODUCT_NAME, "MacPro3") }, + &applesmc_dmi_data[16]}, { applesmc_dmi_match, "Apple MacPro", { DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), DMI_MATCH(DMI_PRODUCT_NAME, "MacPro") }, diff --git a/drivers/ieee1394/ieee1394.h b/drivers/ieee1394/ieee1394.h index e0ae0d3d747f..af320e2c5079 100644 --- a/drivers/ieee1394/ieee1394.h +++ b/drivers/ieee1394/ieee1394.h @@ -54,9 +54,7 @@ #define IEEE1394_SPEED_800 0x03 #define IEEE1394_SPEED_1600 0x04 #define IEEE1394_SPEED_3200 0x05 - -/* The current highest tested speed supported by the subsystem */ -#define IEEE1394_SPEED_MAX IEEE1394_SPEED_800 +#define IEEE1394_SPEED_MAX IEEE1394_SPEED_3200 /* Maps speed values above to a string representation */ extern const char *hpsb_speedto_str[]; diff --git a/drivers/ieee1394/ieee1394_core.c b/drivers/ieee1394/ieee1394_core.c index dcdb71a7718d..2beb8d94f7bd 100644 --- a/drivers/ieee1394/ieee1394_core.c +++ b/drivers/ieee1394/ieee1394_core.c @@ -338,6 +338,7 @@ static void build_speed_map(struct hpsb_host *host, int nodecount) u8 cldcnt[nodecount]; u8 *map = host->speed_map; u8 *speedcap = host->speed; + u8 local_link_speed = host->csr.lnk_spd; struct selfid *sid; struct ext_selfid *esid; int i, j, n; @@ -373,8 +374,8 @@ static void build_speed_map(struct hpsb_host *host, int nodecount) if (sid->port2 == SELFID_PORT_CHILD) cldcnt[n]++; speedcap[n] = sid->speed; - if (speedcap[n] > host->csr.lnk_spd) - speedcap[n] = host->csr.lnk_spd; + if (speedcap[n] > local_link_speed) + speedcap[n] = local_link_speed; n--; } } @@ -407,12 +408,11 @@ static void build_speed_map(struct hpsb_host *host, int nodecount) } } -#if SELFID_SPEED_UNKNOWN != IEEE1394_SPEED_MAX - /* assume maximum speed for 1394b PHYs, nodemgr will correct it */ - for (n = 0; n < nodecount; n++) - if (speedcap[n] == SELFID_SPEED_UNKNOWN) - speedcap[n] = IEEE1394_SPEED_MAX; -#endif + /* assume a maximum speed for 1394b PHYs, nodemgr will correct it */ + if (local_link_speed > SELFID_SPEED_UNKNOWN) + for (i = 0; i < nodecount; i++) + if (speedcap[i] == SELFID_SPEED_UNKNOWN) + speedcap[i] = local_link_speed; } diff --git a/drivers/ieee1394/ohci1394.h b/drivers/ieee1394/ohci1394.h index 4320bf010495..7fb8ab9780ae 100644 --- a/drivers/ieee1394/ohci1394.h +++ b/drivers/ieee1394/ohci1394.h @@ -26,7 +26,7 @@ #define OHCI1394_DRIVER_NAME "ohci1394" -#define OHCI1394_MAX_AT_REQ_RETRIES 0x2 +#define OHCI1394_MAX_AT_REQ_RETRIES 0xf #define OHCI1394_MAX_AT_RESP_RETRIES 0x2 #define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8 #define OHCI1394_MAX_SELF_ID_ERRORS 16 diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c index ab1034ccb7fb..f3fd8657ce4b 100644 --- a/drivers/ieee1394/sbp2.c +++ b/drivers/ieee1394/sbp2.c @@ -115,8 +115,8 @@ */ static int sbp2_max_speed = IEEE1394_SPEED_MAX; module_param_named(max_speed, sbp2_max_speed, int, 0644); -MODULE_PARM_DESC(max_speed, "Force max speed " - "(3 = 800Mb/s, 2 = 400Mb/s, 1 = 200Mb/s, 0 = 100Mb/s)"); +MODULE_PARM_DESC(max_speed, "Limit data transfer speed (5 <= 3200, " + "4 <= 1600, 3 <= 800, 2 <= 400, 1 <= 200, 0 = 100 Mb/s)"); /* * Set serialize_io to 0 or N to use dynamically appended lists of command ORBs. @@ -256,7 +256,7 @@ static int sbp2_set_busy_timeout(struct sbp2_lu *); static int sbp2_max_speed_and_size(struct sbp2_lu *); -static const u8 sbp2_speedto_max_payload[] = { 0x7, 0x8, 0x9, 0xA, 0xB, 0xC }; +static const u8 sbp2_speedto_max_payload[] = { 0x7, 0x8, 0x9, 0xa, 0xa, 0xa }; static DEFINE_RWLOCK(sbp2_hi_logical_units_lock); @@ -347,8 +347,8 @@ static struct scsi_host_template sbp2_shost_template = { .sdev_attrs = sbp2_sysfs_sdev_attrs, }; -/* for match-all entries in sbp2_workarounds_table */ -#define SBP2_ROM_VALUE_WILDCARD 0x1000000 +#define SBP2_ROM_VALUE_WILDCARD ~0 /* match all */ +#define SBP2_ROM_VALUE_MISSING 0xff000000 /* not present in the unit dir. */ /* * List of devices with known bugs. @@ -359,60 +359,70 @@ static struct scsi_host_template sbp2_shost_template = { */ static const struct { u32 firmware_revision; - u32 model_id; + u32 model; unsigned workarounds; } sbp2_workarounds_table[] = { /* DViCO Momobay CX-1 with TSB42AA9 bridge */ { .firmware_revision = 0x002800, - .model_id = 0x001010, + .model = 0x001010, .workarounds = SBP2_WORKAROUND_INQUIRY_36 | SBP2_WORKAROUND_MODE_SENSE_8 | SBP2_WORKAROUND_POWER_CONDITION, }, /* DViCO Momobay FX-3A with TSB42AA9A bridge */ { .firmware_revision = 0x002800, - .model_id = 0x000000, + .model = 0x000000, .workarounds = SBP2_WORKAROUND_DELAY_INQUIRY | SBP2_WORKAROUND_POWER_CONDITION, }, /* Initio bridges, actually only needed for some older ones */ { .firmware_revision = 0x000200, - .model_id = SBP2_ROM_VALUE_WILDCARD, + .model = SBP2_ROM_VALUE_WILDCARD, .workarounds = SBP2_WORKAROUND_INQUIRY_36, }, /* PL-3507 bridge with Prolific firmware */ { .firmware_revision = 0x012800, - .model_id = SBP2_ROM_VALUE_WILDCARD, + .model = SBP2_ROM_VALUE_WILDCARD, .workarounds = SBP2_WORKAROUND_POWER_CONDITION, }, /* Symbios bridge */ { .firmware_revision = 0xa0b800, - .model_id = SBP2_ROM_VALUE_WILDCARD, + .model = SBP2_ROM_VALUE_WILDCARD, .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS, }, /* Datafab MD2-FW2 with Symbios/LSILogic SYM13FW500 bridge */ { .firmware_revision = 0x002600, - .model_id = SBP2_ROM_VALUE_WILDCARD, + .model = SBP2_ROM_VALUE_WILDCARD, .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS, }, + /* + * iPod 2nd generation: needs 128k max transfer size workaround + * iPod 3rd generation: needs fix capacity workaround + */ + { + .firmware_revision = 0x0a2700, + .model = 0x000000, + .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS | + SBP2_WORKAROUND_FIX_CAPACITY, + }, /* iPod 4th generation */ { .firmware_revision = 0x0a2700, - .model_id = 0x000021, + .model = 0x000021, .workarounds = SBP2_WORKAROUND_FIX_CAPACITY, }, /* iPod mini */ { .firmware_revision = 0x0a2700, - .model_id = 0x000022, + .model = 0x000022, .workarounds = SBP2_WORKAROUND_FIX_CAPACITY, }, /* iPod mini */ { .firmware_revision = 0x0a2700, - .model_id = 0x000023, + .model = 0x000023, .workarounds = SBP2_WORKAROUND_FIX_CAPACITY, }, /* iPod Photo */ { .firmware_revision = 0x0a2700, - .model_id = 0x00007e, + .model = 0x00007e, .workarounds = SBP2_WORKAROUND_FIX_CAPACITY, } }; @@ -1341,13 +1351,15 @@ static void sbp2_parse_unit_directory(struct sbp2_lu *lu, struct csr1212_keyval *kv; struct csr1212_dentry *dentry; u64 management_agent_addr; - u32 unit_characteristics, firmware_revision; + u32 unit_characteristics, firmware_revision, model; unsigned workarounds; int i; management_agent_addr = 0; unit_characteristics = 0; - firmware_revision = 0; + firmware_revision = SBP2_ROM_VALUE_MISSING; + model = ud->flags & UNIT_DIRECTORY_MODEL_ID ? + ud->model_id : SBP2_ROM_VALUE_MISSING; csr1212_for_each_dir_entry(ud->ne->csr, kv, ud->ud_kv, dentry) { switch (kv->key.id) { @@ -1388,9 +1400,9 @@ static void sbp2_parse_unit_directory(struct sbp2_lu *lu, sbp2_workarounds_table[i].firmware_revision != (firmware_revision & 0xffff00)) continue; - if (sbp2_workarounds_table[i].model_id != + if (sbp2_workarounds_table[i].model != SBP2_ROM_VALUE_WILDCARD && - sbp2_workarounds_table[i].model_id != ud->model_id) + sbp2_workarounds_table[i].model != model) continue; workarounds |= sbp2_workarounds_table[i].workarounds; break; @@ -1403,7 +1415,7 @@ static void sbp2_parse_unit_directory(struct sbp2_lu *lu, NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid), workarounds, firmware_revision, ud->vendor_id ? ud->vendor_id : ud->ne->vendor_id, - ud->model_id); + model); /* We would need one SCSI host template for each target to adjust * max_sectors on the fly, therefore warn only. */ diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c index 90663e01a56e..60156dfdc608 100644 --- a/drivers/lguest/core.c +++ b/drivers/lguest/core.c @@ -224,7 +224,7 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user) break; /* If the Guest asked to be stopped, we sleep. The Guest's - * clock timer or LHCALL_BREAK from the Waker will wake us. */ + * clock timer or LHREQ_BREAK from the Waker will wake us. */ if (cpu->halted) { set_current_state(TASK_INTERRUPTIBLE); schedule(); diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c index 34bc017b8b3c..b8ee103eed5f 100644 --- a/drivers/lguest/lguest_user.c +++ b/drivers/lguest/lguest_user.c @@ -307,9 +307,8 @@ static int close(struct inode *inode, struct file *file) * kmalloc()ed string, either of which is ok to hand to kfree(). */ if (!IS_ERR(lg->dead)) kfree(lg->dead); - /* We clear the entire structure, which also marks it as free for the - * next user. */ - memset(lg, 0, sizeof(*lg)); + /* Free the memory allocated to the lguest_struct */ + kfree(lg); /* Release lock and exit. */ mutex_unlock(&lguest_lock); diff --git a/drivers/misc/hpilo.c b/drivers/misc/hpilo.c index 05e298289238..10c421b73eaf 100644 --- a/drivers/misc/hpilo.c +++ b/drivers/misc/hpilo.c @@ -758,7 +758,7 @@ static void __exit ilo_exit(void) class_destroy(ilo_class); } -MODULE_VERSION("0.05"); +MODULE_VERSION("0.06"); MODULE_ALIAS(ILO_NAME); MODULE_DESCRIPTION(ILO_NAME); MODULE_AUTHOR("David Altobelli <david.altobelli@hp.com>"); diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c index 9cd2ebe2a3b6..45fd653dbe31 100644 --- a/drivers/misc/sgi-xp/xpc_channel.c +++ b/drivers/misc/sgi-xp/xpc_channel.c @@ -49,9 +49,6 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags) if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING)) return; - - DBUG_ON(ch->local_msgqueue == NULL); - DBUG_ON(ch->remote_msgqueue == NULL); } if (!(ch->flags & XPC_C_OPENREPLY)) { diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c index 82fb9958f22f..2e975762c32b 100644 --- a/drivers/misc/sgi-xp/xpc_sn2.c +++ b/drivers/misc/sgi-xp/xpc_sn2.c @@ -1106,8 +1106,6 @@ xpc_process_activate_IRQ_rcvd_sn2(void) int n_IRQs_expected; int n_IRQs_detected; - DBUG_ON(xpc_activate_IRQ_rcvd == 0); - spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); n_IRQs_expected = xpc_activate_IRQ_rcvd; xpc_activate_IRQ_rcvd = 0; @@ -1726,6 +1724,7 @@ xpc_clear_local_msgqueue_flags_sn2(struct xpc_channel *ch) msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->local_msgqueue + (get % ch->local_nentries) * ch->entry_size); + DBUG_ON(!(msg->flags & XPC_M_SN2_READY)); msg->flags = 0; } while (++get < ch_sn2->remote_GP.get); } @@ -1740,11 +1739,18 @@ xpc_clear_remote_msgqueue_flags_sn2(struct xpc_channel *ch) struct xpc_msg_sn2 *msg; s64 put; - put = ch_sn2->w_remote_GP.put; + /* flags are zeroed when the buffer is allocated */ + if (ch_sn2->remote_GP.put < ch->remote_nentries) + return; + + put = max(ch_sn2->w_remote_GP.put, ch->remote_nentries); do { msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->remote_msgqueue + (put % ch->remote_nentries) * ch->entry_size); + DBUG_ON(!(msg->flags & XPC_M_SN2_READY)); + DBUG_ON(!(msg->flags & XPC_M_SN2_DONE)); + DBUG_ON(msg->number != put - ch->remote_nentries); msg->flags = 0; } while (++put < ch_sn2->remote_GP.put); } @@ -1836,6 +1842,7 @@ xpc_process_msg_chctl_flags_sn2(struct xpc_partition *part, int ch_number) */ xpc_clear_remote_msgqueue_flags_sn2(ch); + smp_wmb(); /* ensure flags have been cleared before bte_copy */ ch_sn2->w_remote_GP.put = ch_sn2->remote_GP.put; dev_dbg(xpc_chan, "w_remote_GP.put changed to %ld, partid=%d, " @@ -1934,7 +1941,7 @@ xpc_get_deliverable_payload_sn2(struct xpc_channel *ch) break; get = ch_sn2->w_local_GP.get; - rmb(); /* guarantee that .get loads before .put */ + smp_rmb(); /* guarantee that .get loads before .put */ if (get == ch_sn2->w_remote_GP.put) break; @@ -1956,11 +1963,13 @@ xpc_get_deliverable_payload_sn2(struct xpc_channel *ch) msg = xpc_pull_remote_msg_sn2(ch, get); - DBUG_ON(msg != NULL && msg->number != get); - DBUG_ON(msg != NULL && (msg->flags & XPC_M_SN2_DONE)); - DBUG_ON(msg != NULL && !(msg->flags & XPC_M_SN2_READY)); + if (msg != NULL) { + DBUG_ON(msg->number != get); + DBUG_ON(msg->flags & XPC_M_SN2_DONE); + DBUG_ON(!(msg->flags & XPC_M_SN2_READY)); - payload = &msg->payload; + payload = &msg->payload; + } break; } @@ -2053,7 +2062,7 @@ xpc_allocate_msg_sn2(struct xpc_channel *ch, u32 flags, while (1) { put = ch_sn2->w_local_GP.put; - rmb(); /* guarantee that .put loads before .get */ + smp_rmb(); /* guarantee that .put loads before .get */ if (put - ch_sn2->w_remote_GP.get < ch->local_nentries) { /* There are available message entries. We need to try @@ -2186,7 +2195,7 @@ xpc_send_payload_sn2(struct xpc_channel *ch, u32 flags, void *payload, * The preceding store of msg->flags must occur before the following * load of local_GP->put. */ - mb(); + smp_mb(); /* see if the message is next in line to be sent, if so send it */ @@ -2277,8 +2286,9 @@ xpc_received_payload_sn2(struct xpc_channel *ch, void *payload) dev_dbg(xpc_chan, "msg=0x%p, msg_number=%ld, partid=%d, channel=%d\n", (void *)msg, msg_number, ch->partid, ch->number); - DBUG_ON((((u64)msg - (u64)ch->remote_msgqueue) / ch->entry_size) != + DBUG_ON((((u64)msg - (u64)ch->sn.sn2.remote_msgqueue) / ch->entry_size) != msg_number % ch->remote_nentries); + DBUG_ON(!(msg->flags & XPC_M_SN2_READY)); DBUG_ON(msg->flags & XPC_M_SN2_DONE); msg->flags |= XPC_M_SN2_DONE; @@ -2287,7 +2297,7 @@ xpc_received_payload_sn2(struct xpc_channel *ch, void *payload) * The preceding store of msg->flags must occur before the following * load of local_GP->get. */ - mb(); + smp_mb(); /* * See if this message is next in line to be acknowledged as having diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c index 91a55b1b1037..f17f7d40ea2c 100644 --- a/drivers/misc/sgi-xp/xpc_uv.c +++ b/drivers/misc/sgi-xp/xpc_uv.c @@ -1423,7 +1423,7 @@ xpc_send_payload_uv(struct xpc_channel *ch, u32 flags, void *payload, atomic_inc(&ch->n_to_notify); msg_slot->key = key; - wmb(); /* a non-NULL func must hit memory after the key */ + smp_wmb(); /* a non-NULL func must hit memory after the key */ msg_slot->func = func; if (ch->flags & XPC_C_DISCONNECTING) { diff --git a/drivers/mtd/ubi/Kconfig.debug b/drivers/mtd/ubi/Kconfig.debug index 1e2ee22edeff..2246f154e2f7 100644 --- a/drivers/mtd/ubi/Kconfig.debug +++ b/drivers/mtd/ubi/Kconfig.debug @@ -33,16 +33,6 @@ config MTD_UBI_DEBUG_DISABLE_BGT This option switches the background thread off by default. The thread may be also be enabled/disabled via UBI sysfs. -config MTD_UBI_DEBUG_USERSPACE_IO - bool "Direct user-space write/erase support" - default n - depends on MTD_UBI_DEBUG - help - By default, users cannot directly write and erase individual - eraseblocks of dynamic volumes, and have to use update operation - instead. This option enables this capability - it is very useful for - debugging and testing. - config MTD_UBI_DEBUG_EMULATE_BITFLIPS bool "Emulate flash bit-flips" depends on MTD_UBI_DEBUG diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index 9082768cc6c3..4048db83aef6 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c @@ -263,8 +263,12 @@ static ssize_t dev_attribute_show(struct device *dev, return ret; } -/* Fake "release" method for UBI devices */ -static void dev_release(struct device *dev) { } +static void dev_release(struct device *dev) +{ + struct ubi_device *ubi = container_of(dev, struct ubi_device, dev); + + kfree(ubi); +} /** * ubi_sysfs_init - initialize sysfs for an UBI device. @@ -380,7 +384,7 @@ static void free_user_volumes(struct ubi_device *ubi) */ static int uif_init(struct ubi_device *ubi) { - int i, err, do_free = 0; + int i, err; dev_t dev; sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num); @@ -427,13 +431,10 @@ static int uif_init(struct ubi_device *ubi) out_volumes: kill_volumes(ubi); - do_free = 0; out_sysfs: ubi_sysfs_close(ubi); cdev_del(&ubi->cdev); out_unreg: - if (do_free) - free_user_volumes(ubi); unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1); ubi_err("cannot initialize UBI %s, error %d", ubi->ubi_name, err); return err; @@ -947,6 +948,12 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway) if (ubi->bgt_thread) kthread_stop(ubi->bgt_thread); + /* + * Get a reference to the device in order to prevent 'dev_release()' + * from freeing @ubi object. + */ + get_device(&ubi->dev); + uif_close(ubi); ubi_wl_close(ubi); free_internal_volumes(ubi); @@ -958,7 +965,7 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway) vfree(ubi->dbg_peb_buf); #endif ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num); - kfree(ubi); + put_device(&ubi->dev); return 0; } diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c index 98cf31ed0814..e63c8fc3df3a 100644 --- a/drivers/mtd/ubi/cdev.c +++ b/drivers/mtd/ubi/cdev.c @@ -40,9 +40,9 @@ #include <linux/ioctl.h> #include <linux/capability.h> #include <linux/uaccess.h> -#include <linux/smp_lock.h> +#include <linux/compat.h> +#include <linux/math64.h> #include <mtd/ubi-user.h> -#include <asm/div64.h> #include "ubi.h" /** @@ -195,7 +195,6 @@ static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count, int err, lnum, off, len, tbuf_size; size_t count_save = count; void *tbuf; - uint64_t tmp; dbg_gen("read %zd bytes from offset %lld of volume %d", count, *offp, vol->vol_id); @@ -225,10 +224,7 @@ static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count, return -ENOMEM; len = count > tbuf_size ? tbuf_size : count; - - tmp = *offp; - off = do_div(tmp, vol->usable_leb_size); - lnum = tmp; + lnum = div_u64_rem(*offp, vol->usable_leb_size, &off); do { cond_resched(); @@ -263,12 +259,9 @@ static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count, return err ? err : count_save - count; } -#ifdef CONFIG_MTD_UBI_DEBUG_USERSPACE_IO - /* * This function allows to directly write to dynamic UBI volumes, without - * issuing the volume update operation. Available only as a debugging feature. - * Very useful for testing UBI. + * issuing the volume update operation. */ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf, size_t count, loff_t *offp) @@ -279,7 +272,9 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf, int lnum, off, len, tbuf_size, err = 0; size_t count_save = count; char *tbuf; - uint64_t tmp; + + if (!vol->direct_writes) + return -EPERM; dbg_gen("requested: write %zd bytes to offset %lld of volume %u", count, *offp, vol->vol_id); @@ -287,10 +282,7 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf, if (vol->vol_type == UBI_STATIC_VOLUME) return -EROFS; - tmp = *offp; - off = do_div(tmp, vol->usable_leb_size); - lnum = tmp; - + lnum = div_u64_rem(*offp, vol->usable_leb_size, &off); if (off & (ubi->min_io_size - 1)) { dbg_err("unaligned position"); return -EINVAL; @@ -347,10 +339,6 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf, return err ? err : count_save - count; } -#else -#define vol_cdev_direct_write(file, buf, count, offp) (-EPERM) -#endif /* CONFIG_MTD_UBI_DEBUG_USERSPACE_IO */ - static ssize_t vol_cdev_write(struct file *file, const char __user *buf, size_t count, loff_t *offp) { @@ -402,8 +390,8 @@ static ssize_t vol_cdev_write(struct file *file, const char __user *buf, return count; } -static int vol_cdev_ioctl(struct inode *inode, struct file *file, - unsigned int cmd, unsigned long arg) +static long vol_cdev_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) { int err = 0; struct ubi_volume_desc *desc = file->private_data; @@ -487,7 +475,6 @@ static int vol_cdev_ioctl(struct inode *inode, struct file *file, break; } -#ifdef CONFIG_MTD_UBI_DEBUG_USERSPACE_IO /* Logical eraseblock erasure command */ case UBI_IOCEBER: { @@ -518,13 +505,77 @@ static int vol_cdev_ioctl(struct inode *inode, struct file *file, err = ubi_wl_flush(ubi); break; } -#endif + + /* Logical eraseblock map command */ + case UBI_IOCEBMAP: + { + struct ubi_map_req req; + + err = copy_from_user(&req, argp, sizeof(struct ubi_map_req)); + if (err) { + err = -EFAULT; + break; + } + err = ubi_leb_map(desc, req.lnum, req.dtype); + break; + } + + /* Logical eraseblock un-map command */ + case UBI_IOCEBUNMAP: + { + int32_t lnum; + + err = get_user(lnum, (__user int32_t *)argp); + if (err) { + err = -EFAULT; + break; + } + err = ubi_leb_unmap(desc, lnum); + break; + } + + /* Check if logical eraseblock is mapped command */ + case UBI_IOCEBISMAP: + { + int32_t lnum; + + err = get_user(lnum, (__user int32_t *)argp); + if (err) { + err = -EFAULT; + break; + } + err = ubi_is_mapped(desc, lnum); + break; + } + + /* Set volume property command*/ + case UBI_IOCSETPROP: + { + struct ubi_set_prop_req req; + + err = copy_from_user(&req, argp, + sizeof(struct ubi_set_prop_req)); + if (err) { + err = -EFAULT; + break; + } + switch (req.property) { + case UBI_PROP_DIRECT_WRITE: + mutex_lock(&ubi->volumes_mutex); + desc->vol->direct_writes = !!req.value; + mutex_unlock(&ubi->volumes_mutex); + break; + default: + err = -EINVAL; + break; + } + break; + } default: err = -ENOTTY; break; } - return err; } @@ -762,8 +813,8 @@ out_free: return err; } -static int ubi_cdev_ioctl(struct inode *inode, struct file *file, - unsigned int cmd, unsigned long arg) +static long ubi_cdev_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) { int err = 0; struct ubi_device *ubi; @@ -773,7 +824,7 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file, if (!capable(CAP_SYS_RESOURCE)) return -EPERM; - ubi = ubi_get_by_major(imajor(inode)); + ubi = ubi_get_by_major(imajor(file->f_mapping->host)); if (!ubi) return -ENODEV; @@ -843,7 +894,6 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file, case UBI_IOCRSVOL: { int pebs; - uint64_t tmp; struct ubi_rsvol_req req; dbg_gen("re-size volume"); @@ -863,9 +913,8 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file, break; } - tmp = req.bytes; - pebs = !!do_div(tmp, desc->vol->usable_leb_size); - pebs += tmp; + pebs = div_u64(req.bytes + desc->vol->usable_leb_size - 1, + desc->vol->usable_leb_size); mutex_lock(&ubi->volumes_mutex); err = ubi_resize_volume(desc, pebs); @@ -909,8 +958,8 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file, return err; } -static int ctrl_cdev_ioctl(struct inode *inode, struct file *file, - unsigned int cmd, unsigned long arg) +static long ctrl_cdev_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) { int err = 0; void __user *argp = (void __user *)arg; @@ -986,26 +1035,59 @@ static int ctrl_cdev_ioctl(struct inode *inode, struct file *file, return err; } -/* UBI control character device operations */ -struct file_operations ubi_ctrl_cdev_operations = { - .ioctl = ctrl_cdev_ioctl, - .owner = THIS_MODULE, +#ifdef CONFIG_COMPAT +static long vol_cdev_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + unsigned long translated_arg = (unsigned long)compat_ptr(arg); + + return vol_cdev_ioctl(file, cmd, translated_arg); +} + +static long ubi_cdev_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + unsigned long translated_arg = (unsigned long)compat_ptr(arg); + + return ubi_cdev_ioctl(file, cmd, translated_arg); +} + +static long ctrl_cdev_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + unsigned long translated_arg = (unsigned long)compat_ptr(arg); + + return ctrl_cdev_ioctl(file, cmd, translated_arg); +} +#else +#define vol_cdev_compat_ioctl NULL +#define ubi_cdev_compat_ioctl NULL +#define ctrl_cdev_compat_ioctl NULL +#endif + +/* UBI volume character device operations */ +const struct file_operations ubi_vol_cdev_operations = { + .owner = THIS_MODULE, + .open = vol_cdev_open, + .release = vol_cdev_release, + .llseek = vol_cdev_llseek, + .read = vol_cdev_read, + .write = vol_cdev_write, + .unlocked_ioctl = vol_cdev_ioctl, + .compat_ioctl = vol_cdev_compat_ioctl, }; /* UBI character device operations */ -struct file_operations ubi_cdev_operations = { - .owner = THIS_MODULE, - .ioctl = ubi_cdev_ioctl, - .llseek = no_llseek, +const struct file_operations ubi_cdev_operations = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .unlocked_ioctl = ubi_cdev_ioctl, + .compat_ioctl = ubi_cdev_compat_ioctl, }; -/* UBI volume character device operations */ -struct file_operations ubi_vol_cdev_operations = { - .owner = THIS_MODULE, - .open = vol_cdev_open, - .release = vol_cdev_release, - .llseek = vol_cdev_llseek, - .read = vol_cdev_read, - .write = vol_cdev_write, - .ioctl = vol_cdev_ioctl, +/* UBI control character device operations */ +const struct file_operations ubi_ctrl_cdev_operations = { + .owner = THIS_MODULE, + .unlocked_ioctl = ctrl_cdev_ioctl, + .compat_ioctl = ctrl_cdev_compat_ioctl, }; diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c index 6dd4f5e77f82..49cd55ade9c8 100644 --- a/drivers/mtd/ubi/gluebi.c +++ b/drivers/mtd/ubi/gluebi.c @@ -28,7 +28,7 @@ * eraseblock size is equivalent to the logical eraseblock size of the volume. */ -#include <asm/div64.h> +#include <linux/math64.h> #include "ubi.h" /** @@ -109,7 +109,6 @@ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len, int err = 0, lnum, offs, total_read; struct ubi_volume *vol; struct ubi_device *ubi; - uint64_t tmp = from; dbg_gen("read %zd bytes from offset %lld", len, from); @@ -119,9 +118,7 @@ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len, vol = container_of(mtd, struct ubi_volume, gluebi_mtd); ubi = vol->ubi; - offs = do_div(tmp, mtd->erasesize); - lnum = tmp; - + lnum = div_u64_rem(from, mtd->erasesize, &offs); total_read = len; while (total_read) { size_t to_read = mtd->erasesize - offs; @@ -160,7 +157,6 @@ static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len, int err = 0, lnum, offs, total_written; struct ubi_volume *vol; struct ubi_device *ubi; - uint64_t tmp = to; dbg_gen("write %zd bytes to offset %lld", len, to); @@ -173,8 +169,7 @@ static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len, if (ubi->ro_mode) return -EROFS; - offs = do_div(tmp, mtd->erasesize); - lnum = tmp; + lnum = div_u64_rem(to, mtd->erasesize, &offs); if (len % mtd->writesize || offs % mtd->writesize) return -EINVAL; diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c index ecde202a5a12..c3d653ba5ca0 100644 --- a/drivers/mtd/ubi/scan.c +++ b/drivers/mtd/ubi/scan.c @@ -42,7 +42,7 @@ #include <linux/err.h> #include <linux/crc32.h> -#include <asm/div64.h> +#include <linux/math64.h> #include "ubi.h" #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID @@ -904,10 +904,8 @@ struct ubi_scan_info *ubi_scan(struct ubi_device *ubi) dbg_msg("scanning is finished"); /* Calculate mean erase counter */ - if (si->ec_count) { - do_div(si->ec_sum, si->ec_count); - si->mean_ec = si->ec_sum; - } + if (si->ec_count) + si->mean_ec = div_u64(si->ec_sum, si->ec_count); if (si->is_empty) ubi_msg("empty MTD device detected"); diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h index 4a8ec485c91d..c055511bb1b2 100644 --- a/drivers/mtd/ubi/ubi.h +++ b/drivers/mtd/ubi/ubi.h @@ -206,6 +206,7 @@ struct ubi_volume_desc; * @upd_marker: %1 if the update marker is set for this volume * @updating: %1 if the volume is being updated * @changing_leb: %1 if the atomic LEB change ioctl command is in progress + * @direct_writes: %1 if direct writes are enabled for this volume * * @gluebi_desc: gluebi UBI volume descriptor * @gluebi_refcount: reference count of the gluebi MTD device @@ -253,6 +254,7 @@ struct ubi_volume { unsigned int upd_marker:1; unsigned int updating:1; unsigned int changing_leb:1; + unsigned int direct_writes:1; #ifdef CONFIG_MTD_UBI_GLUEBI /* @@ -304,7 +306,8 @@ struct ubi_wl_entry; * @vtbl_size: size of the volume table in bytes * @vtbl: in-RAM volume table copy * @volumes_mutex: protects on-flash volume table and serializes volume - * changes, like creation, deletion, update, re-size and re-name + * changes, like creation, deletion, update, re-size, + * re-name and set property * * @max_ec: current highest erase counter value * @mean_ec: current mean erase counter value @@ -449,9 +452,9 @@ struct ubi_device { }; extern struct kmem_cache *ubi_wl_entry_slab; -extern struct file_operations ubi_ctrl_cdev_operations; -extern struct file_operations ubi_cdev_operations; -extern struct file_operations ubi_vol_cdev_operations; +extern const struct file_operations ubi_ctrl_cdev_operations; +extern const struct file_operations ubi_cdev_operations; +extern const struct file_operations ubi_vol_cdev_operations; extern struct class *ubi_class; extern struct mutex ubi_devices_mutex; diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c index 8b89cc18ff0b..6b4d1ae891ae 100644 --- a/drivers/mtd/ubi/upd.c +++ b/drivers/mtd/ubi/upd.c @@ -40,7 +40,7 @@ #include <linux/err.h> #include <linux/uaccess.h> -#include <asm/div64.h> +#include <linux/math64.h> #include "ubi.h" /** @@ -89,7 +89,6 @@ static int clear_update_marker(struct ubi_device *ubi, struct ubi_volume *vol, long long bytes) { int err; - uint64_t tmp; struct ubi_vtbl_record vtbl_rec; dbg_gen("clear update marker for volume %d", vol->vol_id); @@ -101,9 +100,9 @@ static int clear_update_marker(struct ubi_device *ubi, struct ubi_volume *vol, if (vol->vol_type == UBI_STATIC_VOLUME) { vol->corrupted = 0; - vol->used_bytes = tmp = bytes; - vol->last_eb_bytes = do_div(tmp, vol->usable_leb_size); - vol->used_ebs = tmp; + vol->used_bytes = bytes; + vol->used_ebs = div_u64_rem(bytes, vol->usable_leb_size, + &vol->last_eb_bytes); if (vol->last_eb_bytes) vol->used_ebs += 1; else @@ -131,7 +130,6 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol, long long bytes) { int i, err; - uint64_t tmp; dbg_gen("start update of volume %d, %llu bytes", vol->vol_id, bytes); ubi_assert(!vol->updating && !vol->changing_leb); @@ -161,9 +159,8 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol, if (!vol->upd_buf) return -ENOMEM; - tmp = bytes; - vol->upd_ebs = !!do_div(tmp, vol->usable_leb_size); - vol->upd_ebs += tmp; + vol->upd_ebs = div_u64(bytes + vol->usable_leb_size - 1, + vol->usable_leb_size); vol->upd_bytes = bytes; vol->upd_received = 0; return 0; @@ -282,7 +279,6 @@ static int write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol, const void __user *buf, int count) { - uint64_t tmp; int lnum, offs, err = 0, len, to_write = count; dbg_gen("write %d of %lld bytes, %lld already passed", @@ -291,10 +287,7 @@ int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol, if (ubi->ro_mode) return -EROFS; - tmp = vol->upd_received; - offs = do_div(tmp, vol->usable_leb_size); - lnum = tmp; - + lnum = div_u64_rem(vol->upd_received, vol->usable_leb_size, &offs); if (vol->upd_received + count > vol->upd_bytes) to_write = count = vol->upd_bytes - vol->upd_received; diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c index 22e1d7398fce..df5483562b7a 100644 --- a/drivers/mtd/ubi/vmt.c +++ b/drivers/mtd/ubi/vmt.c @@ -24,7 +24,7 @@ */ #include <linux/err.h> -#include <asm/div64.h> +#include <linux/math64.h> #include "ubi.h" #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID @@ -205,7 +205,6 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) int i, err, vol_id = req->vol_id, do_free = 1; struct ubi_volume *vol; struct ubi_vtbl_record vtbl_rec; - uint64_t bytes; dev_t dev; if (ubi->ro_mode) @@ -255,10 +254,8 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) /* Calculate how many eraseblocks are requested */ vol->usable_leb_size = ubi->leb_size - ubi->leb_size % req->alignment; - bytes = req->bytes; - if (do_div(bytes, vol->usable_leb_size)) - vol->reserved_pebs = 1; - vol->reserved_pebs += bytes; + vol->reserved_pebs += div_u64(req->bytes + vol->usable_leb_size - 1, + vol->usable_leb_size); /* Reserve physical eraseblocks */ if (vol->reserved_pebs > ubi->avail_pebs) { @@ -301,10 +298,10 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) vol->used_bytes = (long long)vol->used_ebs * vol->usable_leb_size; } else { - bytes = vol->used_bytes; - vol->last_eb_bytes = do_div(bytes, vol->usable_leb_size); - vol->used_ebs = bytes; - if (vol->last_eb_bytes) + vol->used_ebs = div_u64_rem(vol->used_bytes, + vol->usable_leb_size, + &vol->last_eb_bytes); + if (vol->last_eb_bytes != 0) vol->used_ebs += 1; else vol->last_eb_bytes = vol->usable_leb_size; diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 26474c92193f..c986978ce761 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -31,7 +31,7 @@ char e1000_driver_name[] = "e1000"; static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; -#define DRV_VERSION "7.3.20-k3-NAPI" +#define DRV_VERSION "7.3.21-k3-NAPI" const char e1000_driver_version[] = DRV_VERSION; static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; @@ -3712,7 +3712,7 @@ static irqreturn_t e1000_intr(int irq, void *data) struct e1000_hw *hw = &adapter->hw; u32 rctl, icr = er32(ICR); - if (unlikely(!icr)) + if (unlikely((!icr) || test_bit(__E1000_RESETTING, &adapter->flags))) return IRQ_NONE; /* Not our interrupt */ /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is diff --git a/drivers/net/gianfar_mii.c b/drivers/net/gianfar_mii.c index f3706e415b45..f49a426ad681 100644 --- a/drivers/net/gianfar_mii.c +++ b/drivers/net/gianfar_mii.c @@ -234,6 +234,8 @@ static int gfar_mdio_probe(struct of_device *ofdev, if (NULL == new_bus) return -ENOMEM; + device_init_wakeup(&ofdev->dev, 1); + new_bus->name = "Gianfar MII Bus", new_bus->read = &gfar_mdio_read, new_bus->write = &gfar_mdio_write, diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h index a75a31005fd3..9c78c963b721 100644 --- a/drivers/net/netxen/netxen_nic.h +++ b/drivers/net/netxen/netxen_nic.h @@ -210,7 +210,7 @@ #define MAX_CMD_DESCRIPTORS_HOST 1024 #define MAX_RCV_DESCRIPTORS_1G 2048 #define MAX_RCV_DESCRIPTORS_10G 4096 -#define MAX_JUMBO_RCV_DESCRIPTORS 512 +#define MAX_JUMBO_RCV_DESCRIPTORS 1024 #define MAX_LRO_RCV_DESCRIPTORS 8 #define MAX_RCVSTATUS_DESCRIPTORS MAX_RCV_DESCRIPTORS #define MAX_JUMBO_RCV_DESC MAX_JUMBO_RCV_DESCRIPTORS diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index ca7c8d8050c9..ffd37bea1628 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c @@ -947,8 +947,10 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose) } for (i = 0; i < n; i++) { if (netxen_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 || - netxen_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) + netxen_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) { + kfree(buf); return -EIO; + } buf[i].addr = addr; buf[i].data = val; diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c index 72fd9e97c190..b2dcdb5ed8bd 100644 --- a/drivers/net/r6040.c +++ b/drivers/net/r6040.c @@ -438,7 +438,6 @@ static void r6040_down(struct net_device *dev) { struct r6040_private *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; - struct pci_dev *pdev = lp->pdev; int limit = 2048; u16 *adrp; u16 cmd; diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c index 607efeaf0bc5..9a00e5566af7 100644 --- a/drivers/net/skfp/skfddi.c +++ b/drivers/net/skfp/skfddi.c @@ -1003,9 +1003,9 @@ static int skfp_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) break; case SKFP_CLR_STATS: /* Zero out the driver statistics */ if (!capable(CAP_NET_ADMIN)) { - memset(&lp->MacStat, 0, sizeof(lp->MacStat)); - } else { status = -EPERM; + } else { + memset(&lp->MacStat, 0, sizeof(lp->MacStat)); } break; default: diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c index f513bdf1c887..783c1a7b869e 100644 --- a/drivers/net/smsc911x.c +++ b/drivers/net/smsc911x.c @@ -953,7 +953,7 @@ smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktbytes) do { udelay(1); val = smsc911x_reg_read(pdata, RX_DP_CTRL); - } while (timeout-- && (val & RX_DP_CTRL_RX_FFWD_)); + } while (--timeout && (val & RX_DP_CTRL_RX_FFWD_)); if (unlikely(timeout == 0)) SMSC_WARNING(HW, "Timed out waiting for " diff --git a/drivers/net/smsc9420.c b/drivers/net/smsc9420.c index c14a4c6452c7..d801900a5036 100644 --- a/drivers/net/smsc9420.c +++ b/drivers/net/smsc9420.c @@ -1378,6 +1378,7 @@ static int smsc9420_open(struct net_device *dev) /* test the IRQ connection to the ISR */ smsc_dbg(IFUP, "Testing ISR using IRQ %d", dev->irq); + pd->software_irq_signal = false; spin_lock_irqsave(&pd->int_lock, flags); /* configure interrupt deassertion timer and enable interrupts */ @@ -1393,8 +1394,6 @@ static int smsc9420_open(struct net_device *dev) smsc9420_pci_flush_write(pd); timeout = 1000; - pd->software_irq_signal = false; - smp_wmb(); while (timeout--) { if (pd->software_irq_signal) break; diff --git a/drivers/net/tulip/21142.c b/drivers/net/tulip/21142.c index 1210fb3748a7..db7d5e11855d 100644 --- a/drivers/net/tulip/21142.c +++ b/drivers/net/tulip/21142.c @@ -9,6 +9,11 @@ Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html} for more information on this driver. + + DC21143 manual "21143 PCI/CardBus 10/100Mb/s Ethernet LAN Controller + Hardware Reference Manual" is currently available at : + http://developer.intel.com/design/network/manuals/278074.htm + Please submit bugs to http://bugzilla.kernel.org/ . */ @@ -32,7 +37,11 @@ void t21142_media_task(struct work_struct *work) int csr12 = ioread32(ioaddr + CSR12); int next_tick = 60*HZ; int new_csr6 = 0; + int csr14 = ioread32(ioaddr + CSR14); + /* CSR12[LS10,LS100] are not reliable during autonegotiation */ + if ((csr14 & 0x80) && (csr12 & 0x7000) != 0x5000) + csr12 |= 6; if (tulip_debug > 2) printk(KERN_INFO"%s: 21143 negotiation status %8.8x, %s.\n", dev->name, csr12, medianame[dev->if_port]); @@ -76,7 +85,7 @@ void t21142_media_task(struct work_struct *work) new_csr6 = 0x83860000; dev->if_port = 3; iowrite32(0, ioaddr + CSR13); - iowrite32(0x0003FF7F, ioaddr + CSR14); + iowrite32(0x0003FFFF, ioaddr + CSR14); iowrite16(8, ioaddr + CSR15); iowrite32(1, ioaddr + CSR13); } @@ -132,10 +141,14 @@ void t21142_lnk_change(struct net_device *dev, int csr5) struct tulip_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->base_addr; int csr12 = ioread32(ioaddr + CSR12); + int csr14 = ioread32(ioaddr + CSR14); + /* CSR12[LS10,LS100] are not reliable during autonegotiation */ + if ((csr14 & 0x80) && (csr12 & 0x7000) != 0x5000) + csr12 |= 6; if (tulip_debug > 1) printk(KERN_INFO"%s: 21143 link status interrupt %8.8x, CSR5 %x, " - "%8.8x.\n", dev->name, csr12, csr5, ioread32(ioaddr + CSR14)); + "%8.8x.\n", dev->name, csr12, csr5, csr14); /* If NWay finished and we have a negotiated partner capability. */ if (tp->nway && !tp->nwayset && (csr12 & 0x7000) == 0x5000) { @@ -143,7 +156,9 @@ void t21142_lnk_change(struct net_device *dev, int csr5) int negotiated = tp->sym_advertise & (csr12 >> 16); tp->lpar = csr12 >> 16; tp->nwayset = 1; - if (negotiated & 0x0100) dev->if_port = 5; + /* If partner cannot negotiate, it is 10Mbps Half Duplex */ + if (!(csr12 & 0x8000)) dev->if_port = 0; + else if (negotiated & 0x0100) dev->if_port = 5; else if (negotiated & 0x0080) dev->if_port = 3; else if (negotiated & 0x0040) dev->if_port = 4; else if (negotiated & 0x0020) dev->if_port = 0; @@ -214,7 +229,7 @@ void t21142_lnk_change(struct net_device *dev, int csr5) tp->timer.expires = RUN_AT(3*HZ); add_timer(&tp->timer); } else if (dev->if_port == 5) - iowrite32(ioread32(ioaddr + CSR14) & ~0x080, ioaddr + CSR14); + iowrite32(csr14 & ~0x080, ioaddr + CSR14); } else if (dev->if_port == 0 || dev->if_port == 4) { if ((csr12 & 4) == 0) printk(KERN_INFO"%s: 21143 10baseT link beat good.\n", diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index 11441225bf41..e87986867ba5 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c @@ -1536,6 +1536,11 @@ static void adjust_link(struct net_device *dev) static int init_phy(struct net_device *dev) { struct ucc_geth_private *priv = netdev_priv(dev); + struct device_node *np = priv->node; + struct device_node *phy, *mdio; + const phandle *ph; + char bus_name[MII_BUS_ID_SIZE]; + const unsigned int *id; struct phy_device *phydev; char phy_id[BUS_ID_SIZE]; @@ -1543,8 +1548,18 @@ static int init_phy(struct net_device *dev) priv->oldspeed = 0; priv->oldduplex = -1; - snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, priv->ug_info->mdio_bus, - priv->ug_info->phy_address); + ph = of_get_property(np, "phy-handle", NULL); + phy = of_find_node_by_phandle(*ph); + mdio = of_get_parent(phy); + + id = of_get_property(phy, "reg", NULL); + + of_node_put(phy); + of_node_put(mdio); + + uec_mdio_bus_name(bus_name, mdio); + snprintf(phy_id, sizeof(phy_id), "%s:%02x", + bus_name, *id); phydev = phy_connect(dev, phy_id, &adjust_link, 0, priv->phy_interface); @@ -3748,6 +3763,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma ugeth->ug_info = ug_info; ugeth->dev = dev; + ugeth->node = np; return 0; } diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h index 8f699cb773ee..16cbe42ba43c 100644 --- a/drivers/net/ucc_geth.h +++ b/drivers/net/ucc_geth.h @@ -1186,6 +1186,8 @@ struct ucc_geth_private { int oldspeed; int oldduplex; int oldlink; + + struct device_node *node; }; void uec_set_ethtool_ops(struct net_device *netdev); diff --git a/drivers/net/ucc_geth_mii.c b/drivers/net/ucc_geth_mii.c index c001d261366b..54635911305c 100644 --- a/drivers/net/ucc_geth_mii.c +++ b/drivers/net/ucc_geth_mii.c @@ -156,7 +156,7 @@ static int uec_mdio_probe(struct of_device *ofdev, const struct of_device_id *ma if (err) goto reg_map_fail; - snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", res.start); + uec_mdio_bus_name(new_bus->id, np); new_bus->irq = kmalloc(32 * sizeof(int), GFP_KERNEL); @@ -283,3 +283,13 @@ void uec_mdio_exit(void) { of_unregister_platform_driver(&uec_mdio_driver); } + +void uec_mdio_bus_name(char *name, struct device_node *np) +{ + const u32 *reg; + + reg = of_get_property(np, "reg", NULL); + + snprintf(name, MII_BUS_ID_SIZE, "%s@%x", np->name, reg ? *reg : 0); +} + diff --git a/drivers/net/ucc_geth_mii.h b/drivers/net/ucc_geth_mii.h index 1e45b2028a50..840cf80235b7 100644 --- a/drivers/net/ucc_geth_mii.h +++ b/drivers/net/ucc_geth_mii.h @@ -97,4 +97,5 @@ int uec_mdio_read(struct mii_bus *bus, int mii_id, int regnum); int uec_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value); int __init uec_mdio_init(void); void uec_mdio_exit(void); +void uec_mdio_bus_name(char *name, struct device_node *np); #endif /* __UEC_MII_H */ diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 63ef2a8905fb..c68808336c8c 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -287,7 +287,7 @@ static void try_fill_recv_maxbufs(struct virtnet_info *vi) skb_put(skb, MAX_PACKET_LEN); hdr = skb_vnet_hdr(skb); - sg_init_one(sg, hdr, sizeof(*hdr)); + sg_set_buf(sg, hdr, sizeof(*hdr)); if (vi->big_packets) { for (i = 0; i < MAX_SKB_FRAGS; i++) { @@ -488,9 +488,9 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb) /* Encode metadata header at front. */ if (vi->mergeable_rx_bufs) - sg_init_one(sg, mhdr, sizeof(*mhdr)); + sg_set_buf(sg, mhdr, sizeof(*mhdr)); else - sg_init_one(sg, hdr, sizeof(*hdr)); + sg_set_buf(sg, hdr, sizeof(*hdr)); num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; diff --git a/drivers/net/wimax/i2400m/debugfs.c b/drivers/net/wimax/i2400m/debugfs.c index 626632985977..9b81af3f80a9 100644 --- a/drivers/net/wimax/i2400m/debugfs.c +++ b/drivers/net/wimax/i2400m/debugfs.c @@ -234,20 +234,6 @@ struct dentry *debugfs_create_i2400m_reset( &fops_i2400m_reset); } -/* - * Debug levels control; see debug.h - */ -struct d_level D_LEVEL[] = { - D_SUBMODULE_DEFINE(control), - D_SUBMODULE_DEFINE(driver), - D_SUBMODULE_DEFINE(debugfs), - D_SUBMODULE_DEFINE(fw), - D_SUBMODULE_DEFINE(netdev), - D_SUBMODULE_DEFINE(rfkill), - D_SUBMODULE_DEFINE(rx), - D_SUBMODULE_DEFINE(tx), -}; -size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL); #define __debugfs_register(prefix, name, parent) \ do { \ diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c index 5f98047e18cf..e80a0b65a754 100644 --- a/drivers/net/wimax/i2400m/driver.c +++ b/drivers/net/wimax/i2400m/driver.c @@ -707,6 +707,22 @@ void i2400m_release(struct i2400m *i2400m) EXPORT_SYMBOL_GPL(i2400m_release); +/* + * Debug levels control; see debug.h + */ +struct d_level D_LEVEL[] = { + D_SUBMODULE_DEFINE(control), + D_SUBMODULE_DEFINE(driver), + D_SUBMODULE_DEFINE(debugfs), + D_SUBMODULE_DEFINE(fw), + D_SUBMODULE_DEFINE(netdev), + D_SUBMODULE_DEFINE(rfkill), + D_SUBMODULE_DEFINE(rx), + D_SUBMODULE_DEFINE(tx), +}; +size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL); + + static int __init i2400m_driver_init(void) { diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c index 8ef87356e083..a533ed60bb4d 100644 --- a/drivers/net/wireless/ath5k/base.c +++ b/drivers/net/wireless/ath5k/base.c @@ -1028,6 +1028,8 @@ ath5k_setup_bands(struct ieee80211_hw *hw) * it's done by reseting the chip. To accomplish this we must * first cleanup any pending DMA, then restart stuff after a la * ath5k_init. + * + * Called with sc->lock. */ static int ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan) @@ -2814,11 +2816,17 @@ ath5k_config(struct ieee80211_hw *hw, u32 changed) { struct ath5k_softc *sc = hw->priv; struct ieee80211_conf *conf = &hw->conf; + int ret; + + mutex_lock(&sc->lock); sc->bintval = conf->beacon_int; sc->power_level = conf->power_level; - return ath5k_chan_set(sc, conf->channel); + ret = ath5k_chan_set(sc, conf->channel); + + mutex_unlock(&sc->lock); + return ret; } static int diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index 0dc8eed16404..b35c8813bef4 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c @@ -1719,6 +1719,10 @@ static int iwl_read_ucode(struct iwl_priv *priv) priv->ucode_data_backup.len = data_size; iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup); + if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr || + !priv->ucode_data_backup.v_addr) + goto err_pci_alloc; + /* Initialization instructions and data */ if (init_size && init_data_size) { priv->ucode_init.len = init_size; diff --git a/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c b/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c index 4e75e8e7fa90..78df281b297a 100644 --- a/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c +++ b/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c @@ -285,7 +285,10 @@ static void rtl8225_rf_set_tx_power(struct ieee80211_hw *dev, int channel) ofdm_power = priv->channels[channel - 1].hw_value >> 4; cck_power = min(cck_power, (u8)11); - ofdm_power = min(ofdm_power, (u8)35); + if (ofdm_power > (u8)15) + ofdm_power = 25; + else + ofdm_power += 10; rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK, rtl8225_tx_gain_cck_ofdm[cck_power / 6] >> 1); @@ -536,7 +539,10 @@ static void rtl8225z2_rf_set_tx_power(struct ieee80211_hw *dev, int channel) cck_power += priv->txpwr_base & 0xF; cck_power = min(cck_power, (u8)35); - ofdm_power = min(ofdm_power, (u8)15); + if (ofdm_power > (u8)15) + ofdm_power = 25; + else + ofdm_power += 10; ofdm_power += priv->txpwr_base >> 4; ofdm_power = min(ofdm_power, (u8)35); diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index 3fac8f81d59d..a70cf16ee1ad 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c @@ -668,7 +668,7 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) * @dev: instance of PCI owned by the driver that's asking * @mask: number of address bits this PCI device can handle * - * See Documentation/DMA-mapping.txt + * See Documentation/PCI/PCI-DMA-mapping.txt */ static int sba_dma_supported( struct device *dev, u64 mask) { @@ -680,8 +680,8 @@ static int sba_dma_supported( struct device *dev, u64 mask) return(0); } - /* Documentation/DMA-mapping.txt tells drivers to try 64-bit first, - * then fall back to 32-bit if that fails. + /* Documentation/PCI/PCI-DMA-mapping.txt tells drivers to try 64-bit + * first, then fall back to 32-bit if that fails. * We are just "encouraging" 32-bit DMA masks here since we can * never allow IOMMU bypass unless we add special support for ZX1. */ @@ -706,7 +706,7 @@ static int sba_dma_supported( struct device *dev, u64 mask) * @size: number of bytes to map in driver buffer. * @direction: R/W or both. * - * See Documentation/DMA-mapping.txt + * See Documentation/PCI/PCI-DMA-mapping.txt */ static dma_addr_t sba_map_single(struct device *dev, void *addr, size_t size, @@ -785,7 +785,7 @@ sba_map_single(struct device *dev, void *addr, size_t size, * @size: number of bytes mapped in driver buffer. * @direction: R/W or both. * - * See Documentation/DMA-mapping.txt + * See Documentation/PCI/PCI-DMA-mapping.txt */ static void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, @@ -861,7 +861,7 @@ sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, * @size: number of bytes mapped in driver buffer. * @dma_handle: IOVA of new buffer. * - * See Documentation/DMA-mapping.txt + * See Documentation/PCI/PCI-DMA-mapping.txt */ static void *sba_alloc_consistent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t gfp) @@ -892,7 +892,7 @@ static void *sba_alloc_consistent(struct device *hwdev, size_t size, * @vaddr: virtual address IOVA of "consistent" buffer. * @dma_handler: IO virtual address of "consistent" buffer. * - * See Documentation/DMA-mapping.txt + * See Documentation/PCI/PCI-DMA-mapping.txt */ static void sba_free_consistent(struct device *hwdev, size_t size, void *vaddr, @@ -927,7 +927,7 @@ int dump_run_sg = 0; * @nents: number of entries in list * @direction: R/W or both. * - * See Documentation/DMA-mapping.txt + * See Documentation/PCI/PCI-DMA-mapping.txt */ static int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, @@ -1011,7 +1011,7 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, * @nents: number of entries in list * @direction: R/W or both. * - * See Documentation/DMA-mapping.txt + * See Documentation/PCI/PCI-DMA-mapping.txt */ static void sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index 7c789f0a94d7..de91ddab0a86 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c @@ -382,6 +382,11 @@ static int __init hp_wmi_input_setup(void) case KE_SW: set_bit(EV_SW, hp_wmi_input_dev->evbit); set_bit(key->keycode, hp_wmi_input_dev->swbit); + + /* Set initial dock state */ + input_report_switch(hp_wmi_input_dev, key->keycode, + hp_wmi_dock_state()); + input_sync(hp_wmi_input_dev); break; } } @@ -441,6 +446,7 @@ static int __init hp_wmi_bios_setup(struct platform_device *device) bluetooth_rfkill->toggle_radio = hp_wmi_bluetooth_set; bluetooth_rfkill->user_claim_unsupported = 1; err = rfkill_register(bluetooth_rfkill); + if (err) goto register_bluetooth_error; } diff --git a/drivers/serial/jsm/jsm_tty.c b/drivers/serial/jsm/jsm_tty.c index 3547558d2caf..324c74d2f666 100644 --- a/drivers/serial/jsm/jsm_tty.c +++ b/drivers/serial/jsm/jsm_tty.c @@ -161,6 +161,11 @@ static void jsm_tty_stop_rx(struct uart_port *port) channel->ch_bd->bd_ops->disable_receiver(channel); } +static void jsm_tty_enable_ms(struct uart_port *port) +{ + /* Nothing needed */ +} + static void jsm_tty_break(struct uart_port *port, int break_state) { unsigned long lock_flags; @@ -345,6 +350,7 @@ static struct uart_ops jsm_ops = { .start_tx = jsm_tty_start_tx, .send_xchar = jsm_tty_send_xchar, .stop_rx = jsm_tty_stop_rx, + .enable_ms = jsm_tty_enable_ms, .break_ctl = jsm_tty_break, .startup = jsm_tty_open, .shutdown = jsm_tty_close, diff --git a/drivers/staging/agnx/agnx.h b/drivers/staging/agnx/agnx.h index a75b0db3726c..20f36da62475 100644 --- a/drivers/staging/agnx/agnx.h +++ b/drivers/staging/agnx/agnx.h @@ -1,6 +1,8 @@ #ifndef AGNX_H_ #define AGNX_H_ +#include <linux/io.h> + #include "xmit.h" #define PFX KBUILD_MODNAME ": " diff --git a/drivers/staging/altpciechdma/altpciechdma.c b/drivers/staging/altpciechdma/altpciechdma.c index 8e2b4ca0651d..f516140ca976 100644 --- a/drivers/staging/altpciechdma/altpciechdma.c +++ b/drivers/staging/altpciechdma/altpciechdma.c @@ -531,7 +531,7 @@ static int __devinit dma_test(struct ape_dev *ape, struct pci_dev *dev) goto fail; /* allocate and map coherently-cached memory for a DMA-able buffer */ - /* @see 2.6.26.2/Documentation/DMA-mapping.txt line 318 */ + /* @see Documentation/PCI/PCI-DMA-mapping.txt, near line 318 */ buffer_virt = (u8 *)pci_alloc_consistent(dev, PAGE_SIZE * 4, &buffer_bus); if (!buffer_virt) { printk(KERN_DEBUG "Could not allocate coherent DMA buffer.\n"); @@ -846,7 +846,7 @@ static int __devinit probe(struct pci_dev *dev, const struct pci_device_id *id) #if 1 // @todo For now, disable 64-bit, because I do not understand the implications (DAC!) /* query for DMA transfer */ - /* @see Documentation/DMA-mapping.txt */ + /* @see Documentation/PCI/PCI-DMA-mapping.txt */ if (!pci_set_dma_mask(dev, DMA_64BIT_MASK)) { pci_set_consistent_dma_mask(dev, DMA_64BIT_MASK); /* use 64-bit DMA */ diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c index 6a4ceacb33f5..758131cad08a 100644 --- a/drivers/staging/android/binder.c +++ b/drivers/staging/android/binder.c @@ -319,6 +319,7 @@ int task_get_unused_fd_flags(struct task_struct *tsk, int flags) int fd, error; struct fdtable *fdt; unsigned long rlim_cur; + unsigned long irqs; if (files == NULL) return -ESRCH; @@ -335,12 +336,11 @@ repeat: * N.B. For clone tasks sharing a files structure, this test * will limit the total number of files that can be opened. */ - rcu_read_lock(); - if (tsk->signal) + rlim_cur = 0; + if (lock_task_sighand(tsk, &irqs)) { rlim_cur = tsk->signal->rlim[RLIMIT_NOFILE].rlim_cur; - else - rlim_cur = 0; - rcu_read_unlock(); + unlock_task_sighand(tsk, &irqs); + } if (fd >= rlim_cur) goto out; @@ -2649,14 +2649,14 @@ static void binder_vma_open(struct vm_area_struct *vma) { struct binder_proc *proc = vma->vm_private_data; if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE) - printk(KERN_INFO "binder: %d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, vma->vm_page_prot.pgprot); + printk(KERN_INFO "binder: %d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, pgprot_val(vma->vm_page_prot)); dump_stack(); } static void binder_vma_close(struct vm_area_struct *vma) { struct binder_proc *proc = vma->vm_private_data; if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE) - printk(KERN_INFO "binder: %d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, vma->vm_page_prot.pgprot); + printk(KERN_INFO "binder: %d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, pgprot_val(vma->vm_page_prot)); proc->vma = NULL; } @@ -2677,7 +2677,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) vma->vm_end = vma->vm_start + SZ_4M; if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE) - printk(KERN_INFO "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, vma->vm_page_prot.pgprot); + printk(KERN_INFO "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, pgprot_val(vma->vm_page_prot)); if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { ret = -EPERM; diff --git a/drivers/staging/android/lowmemorykiller.txt b/drivers/staging/android/lowmemorykiller.txt new file mode 100644 index 000000000000..bd5c0c028968 --- /dev/null +++ b/drivers/staging/android/lowmemorykiller.txt @@ -0,0 +1,16 @@ +The lowmemorykiller driver lets user-space specify a set of memory thresholds +where processes with a range of oom_adj values will get killed. Specify the +minimum oom_adj values in /sys/module/lowmemorykiller/parameters/adj and the +number of free pages in /sys/module/lowmemorykiller/parameters/minfree. Both +files take a comma separated list of numbers in ascending order. + +For example, write "0,8" to /sys/module/lowmemorykiller/parameters/adj and +"1024,4096" to /sys/module/lowmemorykiller/parameters/minfree to kill processes +with a oom_adj value of 8 or higher when the free memory drops below 4096 pages +and kill processes with a oom_adj value of 0 or higher when the free memory +drops below 1024 pages. + +The driver considers memory used for caches to be free, but if a large +percentage of the cached memory is locked this can be very inaccurate +and processes may not get killed until the normal oom killer is triggered. + diff --git a/drivers/staging/android/timed_gpio.c b/drivers/staging/android/timed_gpio.c index bea68c9fc942..903270cbbe02 100644 --- a/drivers/staging/android/timed_gpio.c +++ b/drivers/staging/android/timed_gpio.c @@ -18,7 +18,7 @@ #include <linux/platform_device.h> #include <linux/hrtimer.h> #include <linux/err.h> -#include <asm/arch/gpio.h> +#include <linux/gpio.h> #include "timed_gpio.h" @@ -49,7 +49,8 @@ static ssize_t gpio_enable_show(struct device *dev, struct device_attribute *att if (hrtimer_active(&gpio_data->timer)) { ktime_t r = hrtimer_get_remaining(&gpio_data->timer); - remaining = r.tv.sec * 1000 + r.tv.nsec / 1000000; + struct timeval t = ktime_to_timeval(r); + remaining = t.tv_sec * 1000 + t.tv_usec; } else remaining = 0; diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig index b501bfb9c754..b47ca1e7e383 100644 --- a/drivers/staging/comedi/Kconfig +++ b/drivers/staging/comedi/Kconfig @@ -1,6 +1,7 @@ config COMEDI tristate "Data Acquision support (comedi)" default N + depends on m ---help--- Enable support a wide range of data acquision devices for Linux. diff --git a/drivers/staging/meilhaus/Kconfig b/drivers/staging/meilhaus/Kconfig index 6def83fa2c96..923af22a4686 100644 --- a/drivers/staging/meilhaus/Kconfig +++ b/drivers/staging/meilhaus/Kconfig @@ -4,6 +4,7 @@ menuconfig MEILHAUS tristate "Meilhaus support" + depends on m ---help--- If you have a Meilhaus card, say Y (or M) here. @@ -18,7 +19,7 @@ if MEILHAUS config ME0600 tristate "Meilhaus ME-600 support" default n - depends on PCI + depends on PCI && m help This driver supports the Meilhaus ME-600 family of boards that do data collection and multipurpose I/O. @@ -29,7 +30,7 @@ config ME0600 config ME0900 tristate "Meilhaus ME-900 support" default n - depends on PCI + depends on PCI && m help This driver supports the Meilhaus ME-900 family of boards that do data collection and multipurpose I/O. @@ -40,7 +41,7 @@ config ME0900 config ME1000 tristate "Meilhaus ME-1000 support" default n - depends on PCI + depends on PCI && m help This driver supports the Meilhaus ME-1000 family of boards that do data collection and multipurpose I/O. @@ -51,7 +52,7 @@ config ME1000 config ME1400 tristate "Meilhaus ME-1400 support" default n - depends on PCI + depends on PCI && m help This driver supports the Meilhaus ME-1400 family of boards that do data collection and multipurpose I/O. @@ -62,7 +63,7 @@ config ME1400 config ME1600 tristate "Meilhaus ME-1600 support" default n - depends on PCI + depends on PCI && m help This driver supports the Meilhaus ME-1600 family of boards that do data collection and multipurpose I/O. @@ -73,7 +74,7 @@ config ME1600 config ME4600 tristate "Meilhaus ME-4600 support" default n - depends on PCI + depends on PCI && m help This driver supports the Meilhaus ME-4600 family of boards that do data collection and multipurpose I/O. @@ -84,7 +85,7 @@ config ME4600 config ME6000 tristate "Meilhaus ME-6000 support" default n - depends on PCI + depends on PCI && m help This driver supports the Meilhaus ME-6000 family of boards that do data collection and multipurpose I/O. @@ -95,7 +96,7 @@ config ME6000 config ME8100 tristate "Meilhaus ME-8100 support" default n - depends on PCI + depends on PCI && m help This driver supports the Meilhaus ME-8100 family of boards that do data collection and multipurpose I/O. @@ -106,7 +107,7 @@ config ME8100 config ME8200 tristate "Meilhaus ME-8200 support" default n - depends on PCI + depends on PCI && m help This driver supports the Meilhaus ME-8200 family of boards that do data collection and multipurpose I/O. @@ -117,7 +118,7 @@ config ME8200 config MEDUMMY tristate "Meilhaus dummy driver" default n - depends on PCI + depends on PCI && m help This provides a dummy driver for the Meilhaus driver package diff --git a/drivers/staging/poch/poch.c b/drivers/staging/poch/poch.c index ec343ef53a85..0d111ddfabb2 100644 --- a/drivers/staging/poch/poch.c +++ b/drivers/staging/poch/poch.c @@ -1026,7 +1026,7 @@ static int poch_ioctl(struct inode *inode, struct file *filp, } break; case POCH_IOC_GET_COUNTERS: - if (access_ok(VERIFY_WRITE, argp, sizeof(struct poch_counters))) + if (!access_ok(VERIFY_WRITE, argp, sizeof(struct poch_counters))) return -EFAULT; spin_lock_irq(&channel->counters_lock); diff --git a/drivers/staging/usbip/usbip_common.c b/drivers/staging/usbip/usbip_common.c index 72e209276ea7..22f93dd0ba03 100644 --- a/drivers/staging/usbip/usbip_common.c +++ b/drivers/staging/usbip/usbip_common.c @@ -406,8 +406,20 @@ void usbip_start_threads(struct usbip_device *ud) /* * threads are invoked per one device (per one connection). */ - kernel_thread(usbip_thread, (void *)&ud->tcp_rx, 0); - kernel_thread(usbip_thread, (void *)&ud->tcp_tx, 0); + int retval; + + retval = kernel_thread(usbip_thread, (void *)&ud->tcp_rx, 0); + if (retval < 0) { + printk(KERN_ERR "Creating tcp_rx thread for ud %p failed.\n", + ud); + return; + } + retval = kernel_thread(usbip_thread, (void *)&ud->tcp_tx, 0); + if (retval < 0) { + printk(KERN_ERR "Creating tcp_tx thread for ud %p failed.\n", + ud); + return; + } /* confirm threads are starting */ wait_for_completion(&ud->tcp_rx.thread_done); diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index c94f71980c1b..f0267706cb45 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig @@ -41,7 +41,7 @@ menuconfig FB You need an utility program called fbset to make full use of frame buffer devices. Please read <file:Documentation/fb/framebuffer.txt> and the Framebuffer-HOWTO at - <http://www.tahallah.demon.co.uk/programming/prog.html> for more + <http://www.munted.org.uk/programming/Framebuffer-HOWTO-1.2.html> for more information. Say Y here and to the driver for your graphics board below if you diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c index 77ebc3c263d6..549b0144da11 100644 --- a/fs/bio-integrity.c +++ b/fs/bio-integrity.c @@ -140,7 +140,6 @@ int bio_integrity_add_page(struct bio *bio, struct page *page, iv = bip_vec_idx(bip, bip->bip_vcnt); BUG_ON(iv == NULL); - BUG_ON(iv->bv_page != NULL); iv->bv_page = page; iv->bv_len = len; @@ -465,7 +464,7 @@ static int bio_integrity_verify(struct bio *bio) if (ret) { kunmap_atomic(kaddr, KM_USER0); - break; + return ret; } sectors = bv->bv_len / bi->sector_size; @@ -493,18 +492,13 @@ static void bio_integrity_verify_fn(struct work_struct *work) struct bio_integrity_payload *bip = container_of(work, struct bio_integrity_payload, bip_work); struct bio *bio = bip->bip_bio; - int error = bip->bip_error; + int error; - if (bio_integrity_verify(bio)) { - clear_bit(BIO_UPTODATE, &bio->bi_flags); - error = -EIO; - } + error = bio_integrity_verify(bio); /* Restore original bio completion handler */ bio->bi_end_io = bip->bip_end_io; - - if (bio->bi_end_io) - bio->bi_end_io(bio, error); + bio_endio(bio, error); } /** @@ -525,7 +519,17 @@ void bio_integrity_endio(struct bio *bio, int error) BUG_ON(bip->bip_bio != bio); - bip->bip_error = error; + /* In case of an I/O error there is no point in verifying the + * integrity metadata. Restore original bio end_io handler + * and run it. + */ + if (error) { + bio->bi_end_io = bip->bip_end_io; + bio_endio(bio, error); + + return; + } + INIT_WORK(&bip->bip_work, bio_integrity_verify_fn); queue_work(kintegrityd_wq, &bip->bip_work); } diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES index 080703a15f44..73ac7ebd1dfc 100644 --- a/fs/cifs/CHANGES +++ b/fs/cifs/CHANGES @@ -5,7 +5,9 @@ rather than posix (advisory) byte range locks, even though server would support posix byte range locks. Fix query of root inode when prefixpath specified and user does not have access to query information about the top of the share. Fix problem in 2.6.28 resolving DFS paths to -Samba servers (worked to Windows). +Samba servers (worked to Windows). Fix rmdir so that pending search +(readdir) requests do not get invalid results which include the now +removed directory. Version 1.55 ------------ diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c index d4839cf0cb2c..7c9809523f42 100644 --- a/fs/cifs/cifsencrypt.c +++ b/fs/cifs/cifsencrypt.c @@ -48,11 +48,11 @@ static int cifs_calculate_signature(const struct smb_hdr *cifs_pdu, if ((cifs_pdu == NULL) || (signature == NULL) || (key == NULL)) return -EINVAL; - MD5Init(&context); - MD5Update(&context, (char *)&key->data, key->len); - MD5Update(&context, cifs_pdu->Protocol, cifs_pdu->smb_buf_length); + cifs_MD5_init(&context); + cifs_MD5_update(&context, (char *)&key->data, key->len); + cifs_MD5_update(&context, cifs_pdu->Protocol, cifs_pdu->smb_buf_length); - MD5Final(signature, &context); + cifs_MD5_final(signature, &context); return 0; } @@ -96,8 +96,8 @@ static int cifs_calc_signature2(const struct kvec *iov, int n_vec, if ((iov == NULL) || (signature == NULL) || (key == NULL)) return -EINVAL; - MD5Init(&context); - MD5Update(&context, (char *)&key->data, key->len); + cifs_MD5_init(&context); + cifs_MD5_update(&context, (char *)&key->data, key->len); for (i = 0; i < n_vec; i++) { if (iov[i].iov_len == 0) continue; @@ -110,13 +110,13 @@ static int cifs_calc_signature2(const struct kvec *iov, int n_vec, if (i == 0) { if (iov[0].iov_len <= 8) /* cmd field at offset 9 */ break; /* nothing to sign or corrupt header */ - MD5Update(&context, iov[0].iov_base+4, + cifs_MD5_update(&context, iov[0].iov_base+4, iov[0].iov_len-4); } else - MD5Update(&context, iov[i].iov_base, iov[i].iov_len); + cifs_MD5_update(&context, iov[i].iov_base, iov[i].iov_len); } - MD5Final(signature, &context); + cifs_MD5_final(signature, &context); return 0; } diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index 06f6779988bf..382ba6298809 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -35,8 +35,8 @@ extern struct smb_hdr *cifs_buf_get(void); extern void cifs_buf_release(void *); extern struct smb_hdr *cifs_small_buf_get(void); extern void cifs_small_buf_release(void *); -extern int smb_send(struct socket *, struct smb_hdr *, - unsigned int /* length */ , struct sockaddr *, bool); +extern int smb_send(struct TCP_Server_Info *, struct smb_hdr *, + unsigned int /* length */); extern unsigned int _GetXid(void); extern void _FreeXid(unsigned int); #define GetXid() (int)_GetXid(); cFYI(1,("CIFS VFS: in %s as Xid: %d with uid: %d",__func__, xid,current_fsuid())); diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index e9ea394ee075..2209be943051 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -1354,7 +1354,7 @@ cifs_parse_mount_options(char *options, const char *devname, } static struct TCP_Server_Info * -cifs_find_tcp_session(struct sockaddr *addr) +cifs_find_tcp_session(struct sockaddr_storage *addr) { struct list_head *tmp; struct TCP_Server_Info *server; @@ -1374,11 +1374,11 @@ cifs_find_tcp_session(struct sockaddr *addr) if (server->tcpStatus == CifsNew) continue; - if (addr->sa_family == AF_INET && + if (addr->ss_family == AF_INET && (addr4->sin_addr.s_addr != server->addr.sockAddr.sin_addr.s_addr)) continue; - else if (addr->sa_family == AF_INET6 && + else if (addr->ss_family == AF_INET6 && memcmp(&server->addr.sockAddr6.sin6_addr, &addr6->sin6_addr, sizeof(addr6->sin6_addr))) continue; @@ -1419,12 +1419,12 @@ static struct TCP_Server_Info * cifs_get_tcp_session(struct smb_vol *volume_info) { struct TCP_Server_Info *tcp_ses = NULL; - struct sockaddr addr; + struct sockaddr_storage addr; struct sockaddr_in *sin_server = (struct sockaddr_in *) &addr; struct sockaddr_in6 *sin_server6 = (struct sockaddr_in6 *) &addr; int rc; - memset(&addr, 0, sizeof(struct sockaddr)); + memset(&addr, 0, sizeof(struct sockaddr_storage)); if (volume_info->UNCip && volume_info->UNC) { rc = cifs_inet_pton(AF_INET, volume_info->UNCip, @@ -1435,9 +1435,9 @@ cifs_get_tcp_session(struct smb_vol *volume_info) rc = cifs_inet_pton(AF_INET6, volume_info->UNCip, &sin_server6->sin6_addr.in6_u); if (rc > 0) - addr.sa_family = AF_INET6; + addr.ss_family = AF_INET6; } else { - addr.sa_family = AF_INET; + addr.ss_family = AF_INET; } if (rc <= 0) { @@ -1502,7 +1502,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info) tcp_ses->tcpStatus = CifsNew; ++tcp_ses->srv_count; - if (addr.sa_family == AF_INET6) { + if (addr.ss_family == AF_INET6) { cFYI(1, ("attempting ipv6 connect")); /* BB should we allow ipv6 on port 139? */ /* other OS never observed in Wild doing 139 with v6 */ @@ -1802,7 +1802,7 @@ ipv4_connect(struct TCP_Server_Info *server) * user space buffer */ socket->sk->sk_rcvtimeo = 7 * HZ; - socket->sk->sk_sndtimeo = 3 * HZ; + socket->sk->sk_sndtimeo = 5 * HZ; /* make the bufsizes depend on wsize/rsize and max requests */ if (server->noautotune) { @@ -1860,9 +1860,7 @@ ipv4_connect(struct TCP_Server_Info *server) smb_buf = (struct smb_hdr *)ses_init_buf; /* sizeof RFC1002_SESSION_REQUEST with no scope */ smb_buf->smb_buf_length = 0x81000044; - rc = smb_send(socket, smb_buf, 0x44, - (struct sockaddr *) &server->addr.sockAddr, - server->noblocksnd); + rc = smb_send(server, smb_buf, 0x44); kfree(ses_init_buf); msleep(1); /* RFC1001 layer in at least one server requires very short break before negprot @@ -1955,7 +1953,7 @@ ipv6_connect(struct TCP_Server_Info *server) * user space buffer */ socket->sk->sk_rcvtimeo = 7 * HZ; - socket->sk->sk_sndtimeo = 3 * HZ; + socket->sk->sk_sndtimeo = 5 * HZ; server->ssocket = socket; return rc; diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index 838d9c720a5c..964aad03c5ad 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -129,6 +129,17 @@ cifs_bp_rename_retry: return full_path; } +static void setup_cifs_dentry(struct cifsTconInfo *tcon, + struct dentry *direntry, + struct inode *newinode) +{ + if (tcon->nocase) + direntry->d_op = &cifs_ci_dentry_ops; + else + direntry->d_op = &cifs_dentry_ops; + d_instantiate(direntry, newinode); +} + /* Inode operations in similar order to how they appear in Linux file fs.h */ int @@ -139,14 +150,14 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, int xid; int create_options = CREATE_NOT_DIR; int oplock = 0; + /* BB below access is too much for the mknod to request */ int desiredAccess = GENERIC_READ | GENERIC_WRITE; __u16 fileHandle; struct cifs_sb_info *cifs_sb; - struct cifsTconInfo *pTcon; + struct cifsTconInfo *tcon; char *full_path = NULL; FILE_ALL_INFO *buf = NULL; struct inode *newinode = NULL; - struct cifsFileInfo *pCifsFile = NULL; struct cifsInodeInfo *pCifsInode; int disposition = FILE_OVERWRITE_IF; bool write_only = false; @@ -154,7 +165,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, xid = GetXid(); cifs_sb = CIFS_SB(inode->i_sb); - pTcon = cifs_sb->tcon; + tcon = cifs_sb->tcon; full_path = build_path_from_dentry(direntry); if (full_path == NULL) { @@ -162,6 +173,8 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, return -ENOMEM; } + mode &= ~current->fs->umask; + if (nd && (nd->flags & LOOKUP_OPEN)) { int oflags = nd->intent.open.flags; @@ -196,17 +209,15 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, return -ENOMEM; } - mode &= ~current->fs->umask; - /* * if we're not using unix extensions, see if we need to set * ATTR_READONLY on the create call */ - if (!pTcon->unix_ext && (mode & S_IWUGO) == 0) + if (!tcon->unix_ext && (mode & S_IWUGO) == 0) create_options |= CREATE_OPTION_READONLY; if (cifs_sb->tcon->ses->capabilities & CAP_NT_SMBS) - rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, + rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess, create_options, &fileHandle, &oplock, buf, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); @@ -215,7 +226,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, if (rc == -EIO) { /* old server, retry the open legacy style */ - rc = SMBLegacyOpen(xid, pTcon, full_path, disposition, + rc = SMBLegacyOpen(xid, tcon, full_path, disposition, desiredAccess, create_options, &fileHandle, &oplock, buf, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); @@ -225,7 +236,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, } else { /* If Open reported that we actually created a file then we now have to set the mode if possible */ - if ((pTcon->unix_ext) && (oplock & CIFS_CREATE_ACTION)) { + if ((tcon->unix_ext) && (oplock & CIFS_CREATE_ACTION)) { struct cifs_unix_set_info_args args = { .mode = mode, .ctime = NO_CHANGE_64, @@ -244,20 +255,20 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, args.uid = NO_CHANGE_64; args.gid = NO_CHANGE_64; } - CIFSSMBUnixSetInfo(xid, pTcon, full_path, &args, + CIFSSMBUnixSetInfo(xid, tcon, full_path, &args, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); } else { /* BB implement mode setting via Windows security descriptors e.g. */ - /* CIFSSMBWinSetPerms(xid,pTcon,path,mode,-1,-1,nls);*/ + /* CIFSSMBWinSetPerms(xid,tcon,path,mode,-1,-1,nls);*/ /* Could set r/o dos attribute if mode & 0222 == 0 */ } /* server might mask mode so we have to query for it */ - if (pTcon->unix_ext) + if (tcon->unix_ext) rc = cifs_get_inode_info_unix(&newinode, full_path, inode->i_sb, xid); else { @@ -283,22 +294,17 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, } if (rc != 0) { - cFYI(1, - ("Create worked but get_inode_info failed rc = %d", - rc)); - } else { - if (pTcon->nocase) - direntry->d_op = &cifs_ci_dentry_ops; - else - direntry->d_op = &cifs_dentry_ops; - d_instantiate(direntry, newinode); - } + cFYI(1, ("Create worked, get_inode_info failed rc = %d", + rc)); + } else + setup_cifs_dentry(tcon, direntry, newinode); + if ((nd == NULL /* nfsd case - nfs srv does not set nd */) || (!(nd->flags & LOOKUP_OPEN))) { /* mknod case - do not leave file open */ - CIFSSMBClose(xid, pTcon, fileHandle); + CIFSSMBClose(xid, tcon, fileHandle); } else if (newinode) { - pCifsFile = + struct cifsFileInfo *pCifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL); if (pCifsFile == NULL) @@ -316,7 +322,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, /* set the following in open now pCifsFile->pfile = file; */ write_lock(&GlobalSMBSeslock); - list_add(&pCifsFile->tlist, &pTcon->openFileList); + list_add(&pCifsFile->tlist, &tcon->openFileList); pCifsInode = CIFS_I(newinode); if (pCifsInode) { /* if readable file instance put first in list*/ diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 5ab9896fdcb2..bcf7b5184664 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -1285,6 +1285,11 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry) cifsInode = CIFS_I(direntry->d_inode); cifsInode->time = 0; /* force revalidate to go get info when needed */ + + cifsInode = CIFS_I(inode); + cifsInode->time = 0; /* force revalidate to get parent dir info + since cached search results now invalid */ + direntry->d_inode->i_ctime = inode->i_ctime = inode->i_mtime = current_fs_time(inode->i_sb); diff --git a/fs/cifs/md5.c b/fs/cifs/md5.c index 462bbfefd4b6..98b66a54c319 100644 --- a/fs/cifs/md5.c +++ b/fs/cifs/md5.c @@ -10,8 +10,8 @@ * with every copy. * * To compute the message digest of a chunk of bytes, declare an - * MD5Context structure, pass it to MD5Init, call MD5Update as - * needed on buffers full of bytes, and then call MD5Final, which + * MD5Context structure, pass it to cifs_MD5_init, call cifs_MD5_update as + * needed on buffers full of bytes, and then call cifs_MD5_final, which * will fill a supplied 16-byte array with the digest. */ @@ -45,7 +45,7 @@ byteReverse(unsigned char *buf, unsigned longs) * initialization constants. */ void -MD5Init(struct MD5Context *ctx) +cifs_MD5_init(struct MD5Context *ctx) { ctx->buf[0] = 0x67452301; ctx->buf[1] = 0xefcdab89; @@ -61,7 +61,7 @@ MD5Init(struct MD5Context *ctx) * of bytes. */ void -MD5Update(struct MD5Context *ctx, unsigned char const *buf, unsigned len) +cifs_MD5_update(struct MD5Context *ctx, unsigned char const *buf, unsigned len) { register __u32 t; @@ -110,7 +110,7 @@ MD5Update(struct MD5Context *ctx, unsigned char const *buf, unsigned len) * 1 0* (64-bit count of bits processed, MSB-first) */ void -MD5Final(unsigned char digest[16], struct MD5Context *ctx) +cifs_MD5_final(unsigned char digest[16], struct MD5Context *ctx) { unsigned int count; unsigned char *p; @@ -165,7 +165,7 @@ MD5Final(unsigned char digest[16], struct MD5Context *ctx) /* * The core of the MD5 algorithm, this alters an existing MD5 hash to - * reflect the addition of 16 longwords of new data. MD5Update blocks + * reflect the addition of 16 longwords of new data. cifs_MD5_update blocks * the data and converts bytes into longwords for this routine. */ static void @@ -267,9 +267,9 @@ hmac_md5_init_rfc2104(unsigned char *key, int key_len, unsigned char tk[16]; struct MD5Context tctx; - MD5Init(&tctx); - MD5Update(&tctx, key, key_len); - MD5Final(tk, &tctx); + cifs_MD5_init(&tctx); + cifs_MD5_update(&tctx, key, key_len); + cifs_MD5_final(tk, &tctx); key = tk; key_len = 16; @@ -287,8 +287,8 @@ hmac_md5_init_rfc2104(unsigned char *key, int key_len, ctx->k_opad[i] ^= 0x5c; } - MD5Init(&ctx->ctx); - MD5Update(&ctx->ctx, ctx->k_ipad, 64); + cifs_MD5_init(&ctx->ctx); + cifs_MD5_update(&ctx->ctx, ctx->k_ipad, 64); } #endif @@ -317,8 +317,8 @@ hmac_md5_init_limK_to_64(const unsigned char *key, int key_len, ctx->k_opad[i] ^= 0x5c; } - MD5Init(&ctx->ctx); - MD5Update(&ctx->ctx, ctx->k_ipad, 64); + cifs_MD5_init(&ctx->ctx); + cifs_MD5_update(&ctx->ctx, ctx->k_ipad, 64); } /*********************************************************************** @@ -328,7 +328,7 @@ void hmac_md5_update(const unsigned char *text, int text_len, struct HMACMD5Context *ctx) { - MD5Update(&ctx->ctx, text, text_len); /* then text of datagram */ + cifs_MD5_update(&ctx->ctx, text, text_len); /* then text of datagram */ } /*********************************************************************** @@ -339,12 +339,12 @@ hmac_md5_final(unsigned char *digest, struct HMACMD5Context *ctx) { struct MD5Context ctx_o; - MD5Final(digest, &ctx->ctx); + cifs_MD5_final(digest, &ctx->ctx); - MD5Init(&ctx_o); - MD5Update(&ctx_o, ctx->k_opad, 64); - MD5Update(&ctx_o, digest, 16); - MD5Final(digest, &ctx_o); + cifs_MD5_init(&ctx_o); + cifs_MD5_update(&ctx_o, ctx->k_opad, 64); + cifs_MD5_update(&ctx_o, digest, 16); + cifs_MD5_final(digest, &ctx_o); } /*********************************************************** diff --git a/fs/cifs/md5.h b/fs/cifs/md5.h index f7d4f4197bac..6fba8cb402fd 100644 --- a/fs/cifs/md5.h +++ b/fs/cifs/md5.h @@ -20,10 +20,10 @@ struct HMACMD5Context { }; #endif /* _HMAC_MD5_H */ -void MD5Init(struct MD5Context *context); -void MD5Update(struct MD5Context *context, unsigned char const *buf, +void cifs_MD5_init(struct MD5Context *context); +void cifs_MD5_update(struct MD5Context *context, unsigned char const *buf, unsigned len); -void MD5Final(unsigned char digest[16], struct MD5Context *context); +void cifs_MD5_final(unsigned char digest[16], struct MD5Context *context); /* The following definitions come from lib/hmacmd5.c */ diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 7ebe6599ed3a..0ad3e2d116a6 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -154,81 +154,8 @@ void DeleteTconOplockQEntries(struct cifsTconInfo *tcon) spin_unlock(&GlobalMid_Lock); } -int -smb_send(struct socket *ssocket, struct smb_hdr *smb_buffer, - unsigned int smb_buf_length, struct sockaddr *sin, bool noblocksnd) -{ - int rc = 0; - int i = 0; - struct msghdr smb_msg; - struct kvec iov; - unsigned len = smb_buf_length + 4; - - if (ssocket == NULL) - return -ENOTSOCK; /* BB eventually add reconnect code here */ - iov.iov_base = smb_buffer; - iov.iov_len = len; - - smb_msg.msg_name = sin; - smb_msg.msg_namelen = sizeof(struct sockaddr); - smb_msg.msg_control = NULL; - smb_msg.msg_controllen = 0; - if (noblocksnd) - smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL; - else - smb_msg.msg_flags = MSG_NOSIGNAL; - - /* smb header is converted in header_assemble. bcc and rest of SMB word - area, and byte area if necessary, is converted to littleendian in - cifssmb.c and RFC1001 len is converted to bigendian in smb_send - Flags2 is converted in SendReceive */ - - smb_buffer->smb_buf_length = cpu_to_be32(smb_buffer->smb_buf_length); - cFYI(1, ("Sending smb of length %d", smb_buf_length)); - dump_smb(smb_buffer, len); - - while (len > 0) { - rc = kernel_sendmsg(ssocket, &smb_msg, &iov, 1, len); - if ((rc == -ENOSPC) || (rc == -EAGAIN)) { - i++; - /* smaller timeout here than send2 since smaller size */ - /* Although it may not be required, this also is smaller - oplock break time */ - if (i > 12) { - cERROR(1, - ("sends on sock %p stuck for 7 seconds", - ssocket)); - rc = -EAGAIN; - break; - } - msleep(1 << i); - continue; - } - if (rc < 0) - break; - else - i = 0; /* reset i after each successful send */ - iov.iov_base += rc; - iov.iov_len -= rc; - len -= rc; - } - - if (rc < 0) { - cERROR(1, ("Error %d sending data on socket to server", rc)); - } else { - rc = 0; - } - - /* Don't want to modify the buffer as a - side effect of this call. */ - smb_buffer->smb_buf_length = smb_buf_length; - - return rc; -} - static int -smb_send2(struct TCP_Server_Info *server, struct kvec *iov, int n_vec, - struct sockaddr *sin, bool noblocksnd) +smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec) { int rc = 0; int i = 0; @@ -243,11 +170,11 @@ smb_send2(struct TCP_Server_Info *server, struct kvec *iov, int n_vec, if (ssocket == NULL) return -ENOTSOCK; /* BB eventually add reconnect code here */ - smb_msg.msg_name = sin; + smb_msg.msg_name = (struct sockaddr *) &server->addr.sockAddr; smb_msg.msg_namelen = sizeof(struct sockaddr); smb_msg.msg_control = NULL; smb_msg.msg_controllen = 0; - if (noblocksnd) + if (server->noblocksnd) smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL; else smb_msg.msg_flags = MSG_NOSIGNAL; @@ -272,7 +199,25 @@ smb_send2(struct TCP_Server_Info *server, struct kvec *iov, int n_vec, n_vec - first_vec, total_len); if ((rc == -ENOSPC) || (rc == -EAGAIN)) { i++; - if (i >= 14) { + /* if blocking send we try 3 times, since each can block + for 5 seconds. For nonblocking we have to try more + but wait increasing amounts of time allowing time for + socket to clear. The overall time we wait in either + case to send on the socket is about 15 seconds. + Similarly we wait for 15 seconds for + a response from the server in SendReceive[2] + for the server to send a response back for + most types of requests (except SMB Write + past end of file which can be slow, and + blocking lock operations). NFS waits slightly longer + than CIFS, but this can make it take longer for + nonresponsive servers to be detected and 15 seconds + is more than enough time for modern networks to + send a packet. In most cases if we fail to send + after the retries we will kill the socket and + reconnect which may clear the network problem. + */ + if ((i >= 14) || (!server->noblocksnd && (i > 2))) { cERROR(1, ("sends on sock %p stuck for 15 seconds", ssocket)); @@ -339,6 +284,18 @@ smb_send2(struct TCP_Server_Info *server, struct kvec *iov, int n_vec, return rc; } +int +smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer, + unsigned int smb_buf_length) +{ + struct kvec iov; + + iov.iov_base = smb_buffer; + iov.iov_len = smb_buf_length + 4; + + return smb_sendv(server, &iov, 1); +} + static int wait_for_free_request(struct cifsSesInfo *ses, const int long_op) { if (long_op == CIFS_ASYNC_OP) { @@ -540,9 +497,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, #ifdef CONFIG_CIFS_STATS2 atomic_inc(&ses->server->inSend); #endif - rc = smb_send2(ses->server, iov, n_vec, - (struct sockaddr *) &(ses->server->addr.sockAddr), - ses->server->noblocksnd); + rc = smb_sendv(ses->server, iov, n_vec); #ifdef CONFIG_CIFS_STATS2 atomic_dec(&ses->server->inSend); midQ->when_sent = jiffies; @@ -736,9 +691,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses, #ifdef CONFIG_CIFS_STATS2 atomic_inc(&ses->server->inSend); #endif - rc = smb_send(ses->server->ssocket, in_buf, in_buf->smb_buf_length, - (struct sockaddr *) &(ses->server->addr.sockAddr), - ses->server->noblocksnd); + rc = smb_send(ses->server, in_buf, in_buf->smb_buf_length); #ifdef CONFIG_CIFS_STATS2 atomic_dec(&ses->server->inSend); midQ->when_sent = jiffies; @@ -879,9 +832,7 @@ send_nt_cancel(struct cifsTconInfo *tcon, struct smb_hdr *in_buf, mutex_unlock(&ses->server->srv_mutex); return rc; } - rc = smb_send(ses->server->ssocket, in_buf, in_buf->smb_buf_length, - (struct sockaddr *) &(ses->server->addr.sockAddr), - ses->server->noblocksnd); + rc = smb_send(ses->server, in_buf, in_buf->smb_buf_length); mutex_unlock(&ses->server->srv_mutex); return rc; } @@ -973,9 +924,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon, #ifdef CONFIG_CIFS_STATS2 atomic_inc(&ses->server->inSend); #endif - rc = smb_send(ses->server->ssocket, in_buf, in_buf->smb_buf_length, - (struct sockaddr *) &(ses->server->addr.sockAddr), - ses->server->noblocksnd); + rc = smb_send(ses->server, in_buf, in_buf->smb_buf_length); #ifdef CONFIG_CIFS_STATS2 atomic_dec(&ses->server->inSend); midQ->when_sent = jiffies; diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c index 5235c67e7594..c8f8d5904f5e 100644 --- a/fs/compat_ioctl.c +++ b/fs/compat_ioctl.c @@ -538,6 +538,7 @@ static int dev_ifsioc(unsigned int fd, unsigned int cmd, unsigned long arg) * cannot be fixed without breaking all existing apps. */ case TUNSETIFF: + case TUNGETIFF: case SIOCGIFFLAGS: case SIOCGIFMETRIC: case SIOCGIFMTU: @@ -1982,6 +1983,11 @@ COMPATIBLE_IOCTL(TUNSETNOCSUM) COMPATIBLE_IOCTL(TUNSETDEBUG) COMPATIBLE_IOCTL(TUNSETPERSIST) COMPATIBLE_IOCTL(TUNSETOWNER) +COMPATIBLE_IOCTL(TUNSETLINK) +COMPATIBLE_IOCTL(TUNSETGROUP) +COMPATIBLE_IOCTL(TUNGETFEATURES) +COMPATIBLE_IOCTL(TUNSETOFFLOAD) +COMPATIBLE_IOCTL(TUNSETTXFILTER) /* Big V */ COMPATIBLE_IOCTL(VT_SETMODE) COMPATIBLE_IOCTL(VT_GETMODE) @@ -2573,6 +2579,7 @@ HANDLE_IOCTL(SIOCGIFPFLAGS, dev_ifsioc) HANDLE_IOCTL(SIOCGIFTXQLEN, dev_ifsioc) HANDLE_IOCTL(SIOCSIFTXQLEN, dev_ifsioc) HANDLE_IOCTL(TUNSETIFF, dev_ifsioc) +HANDLE_IOCTL(TUNGETIFF, dev_ifsioc) HANDLE_IOCTL(SIOCETHTOOL, ethtool_ioctl) HANDLE_IOCTL(SIOCBONDENSLAVE, bond_ioctl) HANDLE_IOCTL(SIOCBONDRELEASE, bond_ioctl) diff --git a/fs/eventpoll.c b/fs/eventpoll.c index ba2f9ec71192..011b9b8c90c6 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -234,8 +234,6 @@ struct ep_pqueue { /* * Configuration options available inside /proc/sys/fs/epoll/ */ -/* Maximum number of epoll devices, per user */ -static int max_user_instances __read_mostly; /* Maximum number of epoll watched descriptors, per user */ static int max_user_watches __read_mostly; @@ -261,14 +259,6 @@ static int zero; ctl_table epoll_table[] = { { - .procname = "max_user_instances", - .data = &max_user_instances, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec_minmax, - .extra1 = &zero, - }, - { .procname = "max_user_watches", .data = &max_user_watches, .maxlen = sizeof(int), @@ -491,7 +481,6 @@ static void ep_free(struct eventpoll *ep) mutex_unlock(&epmutex); mutex_destroy(&ep->mtx); - atomic_dec(&ep->user->epoll_devs); free_uid(ep->user); kfree(ep); } @@ -581,10 +570,6 @@ static int ep_alloc(struct eventpoll **pep) struct eventpoll *ep; user = get_current_user(); - error = -EMFILE; - if (unlikely(atomic_read(&user->epoll_devs) >= - max_user_instances)) - goto free_uid; error = -ENOMEM; ep = kzalloc(sizeof(*ep), GFP_KERNEL); if (unlikely(!ep)) @@ -1141,7 +1126,6 @@ SYSCALL_DEFINE1(epoll_create1, int, flags) flags & O_CLOEXEC); if (fd < 0) ep_free(ep); - atomic_inc(&ep->user->epoll_devs); error_return: DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d) = %d\n", @@ -1366,8 +1350,10 @@ static int __init eventpoll_init(void) struct sysinfo si; si_meminfo(&si); - max_user_instances = 128; - max_user_watches = (((si.totalram - si.totalhigh) / 32) << PAGE_SHIFT) / + /* + * Allows top 4% of lomem to be allocated for epoll watches (per user). + */ + max_user_watches = (((si.totalram - si.totalhigh) / 25) << PAGE_SHIFT) / EP_ITEM_COST; /* Initialize the structure used to perform safe poll wait head wake ups */ diff --git a/include/linux/Kbuild b/include/linux/Kbuild index 12e9a2957caf..2124c063a7ef 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild @@ -41,6 +41,7 @@ header-y += baycom.h header-y += bfs_fs.h header-y += blkpg.h header-y += bpqether.h +header-y += bsg.h header-y += can.h header-y += cdk.h header-y += chio.h diff --git a/include/linux/bio.h b/include/linux/bio.h index 18462c5b8fff..0942765cf8c0 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -144,7 +144,7 @@ struct bio { * bit 1 -- rw-ahead when set * bit 2 -- barrier * Insert a serialization point in the IO queue, forcing previously - * submitted IO to be completed before this oen is issued. + * submitted IO to be completed before this one is issued. * bit 3 -- synchronous I/O hint: the block layer will unplug immediately * Note that this does NOT indicate that the IO itself is sync, just * that the block layer will not postpone issue of this IO by plugging. @@ -163,12 +163,33 @@ struct bio { #define BIO_RW 0 /* Must match RW in req flags (blkdev.h) */ #define BIO_RW_AHEAD 1 /* Must match FAILFAST in req flags */ #define BIO_RW_BARRIER 2 -#define BIO_RW_SYNC 3 -#define BIO_RW_META 4 -#define BIO_RW_DISCARD 5 -#define BIO_RW_FAILFAST_DEV 6 -#define BIO_RW_FAILFAST_TRANSPORT 7 -#define BIO_RW_FAILFAST_DRIVER 8 +#define BIO_RW_SYNCIO 3 +#define BIO_RW_UNPLUG 4 +#define BIO_RW_META 5 +#define BIO_RW_DISCARD 6 +#define BIO_RW_FAILFAST_DEV 7 +#define BIO_RW_FAILFAST_TRANSPORT 8 +#define BIO_RW_FAILFAST_DRIVER 9 + +#define BIO_RW_SYNC (BIO_RW_SYNCIO | BIO_RW_UNPLUG) + +#define bio_rw_flagged(bio, flag) ((bio)->bi_rw & (1 << (flag))) + +/* + * Old defines, these should eventually be replaced by direct usage of + * bio_rw_flagged() + */ +#define bio_barrier(bio) bio_rw_flagged(bio, BIO_RW_BARRIER) +#define bio_sync(bio) bio_rw_flagged(bio, BIO_RW_SYNCIO) +#define bio_unplug(bio) bio_rw_flagged(bio, BIO_RW_UNPLUG) +#define bio_failfast_dev(bio) bio_rw_flagged(bio, BIO_RW_FAILFAST_DEV) +#define bio_failfast_transport(bio) \ + bio_rw_flagged(bio, BIO_RW_FAILFAST_TRANSPORT) +#define bio_failfast_driver(bio) \ + bio_rw_flagged(bio, BIO_RW_FAILFAST_DRIVER) +#define bio_rw_ahead(bio) bio_rw_flagged(bio, BIO_RW_AHEAD) +#define bio_rw_meta(bio) bio_rw_flagged(bio, BIO_RW_META) +#define bio_discard(bio) bio_rw_flagged(bio, BIO_RW_DISCARD) /* * upper 16 bits of bi_rw define the io priority of this bio @@ -193,15 +214,6 @@ struct bio { #define bio_offset(bio) bio_iovec((bio))->bv_offset #define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx) #define bio_sectors(bio) ((bio)->bi_size >> 9) -#define bio_barrier(bio) ((bio)->bi_rw & (1 << BIO_RW_BARRIER)) -#define bio_sync(bio) ((bio)->bi_rw & (1 << BIO_RW_SYNC)) -#define bio_failfast_dev(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST_DEV)) -#define bio_failfast_transport(bio) \ - ((bio)->bi_rw & (1 << BIO_RW_FAILFAST_TRANSPORT)) -#define bio_failfast_driver(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST_DRIVER)) -#define bio_rw_ahead(bio) ((bio)->bi_rw & (1 << BIO_RW_AHEAD)) -#define bio_rw_meta(bio) ((bio)->bi_rw & (1 << BIO_RW_META)) -#define bio_discard(bio) ((bio)->bi_rw & (1 << BIO_RW_DISCARD)) #define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio)) static inline unsigned int bio_cur_sectors(struct bio *bio) @@ -312,7 +324,6 @@ struct bio_integrity_payload { void *bip_buf; /* generated integrity data */ bio_end_io_t *bip_end_io; /* saved I/O completion fn */ - int bip_error; /* saved I/O error */ unsigned int bip_size; unsigned short bip_pool; /* pool the ivec came from */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 044467ef7b11..d08c4b8219a6 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -108,6 +108,7 @@ enum rq_flag_bits { __REQ_RW_META, /* metadata io request */ __REQ_COPY_USER, /* contains copies of user pages */ __REQ_INTEGRITY, /* integrity metadata has been remapped */ + __REQ_UNPLUG, /* unplug queue on submission */ __REQ_NR_BITS, /* stops here */ }; @@ -134,6 +135,7 @@ enum rq_flag_bits { #define REQ_RW_META (1 << __REQ_RW_META) #define REQ_COPY_USER (1 << __REQ_COPY_USER) #define REQ_INTEGRITY (1 << __REQ_INTEGRITY) +#define REQ_UNPLUG (1 << __REQ_UNPLUG) #define BLK_MAX_CDB 16 @@ -449,6 +451,11 @@ struct request_queue #define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */ #define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */ #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ +#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ + +#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ + (1 << QUEUE_FLAG_CLUSTER) | \ + 1 << QUEUE_FLAG_STACKABLE) static inline int queue_is_locked(struct request_queue *q) { @@ -565,6 +572,7 @@ enum { #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) +#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) #define blk_queue_flushing(q) ((q)->ordseq) #define blk_queue_stackable(q) \ test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index e267e62827bb..e4e8e117d27d 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -99,6 +99,7 @@ static inline bool css_tryget(struct cgroup_subsys_state *css) while (!atomic_inc_not_zero(&css->refcnt)) { if (test_bit(CSS_REMOVED, &css->flags)) return false; + cpu_relax(); } return true; } diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index d6ea19e314bb..32851eef48f0 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -49,6 +49,13 @@ /* Attach to insert probes on any functions which should be ignored*/ #define __kprobes __attribute__((__section__(".kprobes.text"))) notrace +#else /* CONFIG_KPROBES */ +typedef int kprobe_opcode_t; +struct arch_specific_insn { + int dummy; +}; +#define __kprobes notrace +#endif /* CONFIG_KPROBES */ struct kprobe; struct pt_regs; @@ -131,23 +138,6 @@ struct jprobe { /* For backward compatibility with old code using JPROBE_ENTRY() */ #define JPROBE_ENTRY(handler) (handler) -DECLARE_PER_CPU(struct kprobe *, current_kprobe); -DECLARE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); - -#ifdef CONFIG_KRETPROBES -extern void arch_prepare_kretprobe(struct kretprobe_instance *ri, - struct pt_regs *regs); -extern int arch_trampoline_kprobe(struct kprobe *p); -#else /* CONFIG_KRETPROBES */ -static inline void arch_prepare_kretprobe(struct kretprobe *rp, - struct pt_regs *regs) -{ -} -static inline int arch_trampoline_kprobe(struct kprobe *p) -{ - return 0; -} -#endif /* CONFIG_KRETPROBES */ /* * Function-return probe - * Note: @@ -188,6 +178,25 @@ struct kprobe_blackpoint { unsigned long range; }; +#ifdef CONFIG_KPROBES +DECLARE_PER_CPU(struct kprobe *, current_kprobe); +DECLARE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); + +#ifdef CONFIG_KRETPROBES +extern void arch_prepare_kretprobe(struct kretprobe_instance *ri, + struct pt_regs *regs); +extern int arch_trampoline_kprobe(struct kprobe *p); +#else /* CONFIG_KRETPROBES */ +static inline void arch_prepare_kretprobe(struct kretprobe *rp, + struct pt_regs *regs) +{ +} +static inline int arch_trampoline_kprobe(struct kprobe *p) +{ + return 0; +} +#endif /* CONFIG_KRETPROBES */ + extern struct kretprobe_blackpoint kretprobe_blacklist[]; static inline void kretprobe_assert(struct kretprobe_instance *ri, @@ -264,10 +273,6 @@ void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head); #else /* CONFIG_KPROBES */ -#define __kprobes notrace -struct jprobe; -struct kretprobe; - static inline struct kprobe *get_kprobe(void *addr) { return NULL; diff --git a/include/linux/sched.h b/include/linux/sched.h index 02e16d207304..5a7c76388731 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -630,7 +630,6 @@ struct user_struct { atomic_t inotify_devs; /* How many inotify devs does this user have opened? */ #endif #ifdef CONFIG_EPOLL - atomic_t epoll_devs; /* The number of epoll descriptors currently open */ atomic_t epoll_watches; /* The number of file descriptors currently watched */ #endif #ifdef CONFIG_POSIX_MQUEUE diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 16875f89e6a7..0eda02ff2414 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -108,9 +108,14 @@ struct old_linux_dirent; asm ("\t.globl " #alias "\n\t.set " #alias ", " #name "\n" \ "\t.globl ." #alias "\n\t.set ." #alias ", ." #name) #else +#ifdef CONFIG_ALPHA +#define SYSCALL_ALIAS(alias, name) \ + asm ( #alias " = " #name "\n\t.globl " #alias) +#else #define SYSCALL_ALIAS(alias, name) \ asm ("\t.globl " #alias "\n\t.set " #alias ", " #name) #endif +#endif #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS diff --git a/include/media/videobuf-dma-sg.h b/include/media/videobuf-dma-sg.h index 90edd22d343c..dda47f0082e9 100644 --- a/include/media/videobuf-dma-sg.h +++ b/include/media/videobuf-dma-sg.h @@ -49,7 +49,7 @@ struct scatterlist* videobuf_pages_to_sg(struct page **pages, int nr_pages, * does memory allocation too using vmalloc_32(). * * videobuf_dma_*() - * see Documentation/DMA-mapping.txt, these functions to + * see Documentation/PCI/PCI-DMA-mapping.txt, these functions to * basically the same. The map function does also build a * scatterlist for the buffer (and unmap frees it ...) * diff --git a/include/mtd/ubi-user.h b/include/mtd/ubi-user.h index 2dc2eb2b8e22..296efae3525e 100644 --- a/include/mtd/ubi-user.h +++ b/include/mtd/ubi-user.h @@ -40,37 +40,37 @@ * UBI volume creation * ~~~~~~~~~~~~~~~~~~~ * - * UBI volumes are created via the %UBI_IOCMKVOL IOCTL command of UBI character + * UBI volumes are created via the %UBI_IOCMKVOL ioctl command of UBI character * device. A &struct ubi_mkvol_req object has to be properly filled and a - * pointer to it has to be passed to the IOCTL. + * pointer to it has to be passed to the ioctl. * * UBI volume deletion * ~~~~~~~~~~~~~~~~~~~ * - * To delete a volume, the %UBI_IOCRMVOL IOCTL command of the UBI character + * To delete a volume, the %UBI_IOCRMVOL ioctl command of the UBI character * device should be used. A pointer to the 32-bit volume ID hast to be passed - * to the IOCTL. + * to the ioctl. * * UBI volume re-size * ~~~~~~~~~~~~~~~~~~ * - * To re-size a volume, the %UBI_IOCRSVOL IOCTL command of the UBI character + * To re-size a volume, the %UBI_IOCRSVOL ioctl command of the UBI character * device should be used. A &struct ubi_rsvol_req object has to be properly - * filled and a pointer to it has to be passed to the IOCTL. + * filled and a pointer to it has to be passed to the ioctl. * * UBI volumes re-name * ~~~~~~~~~~~~~~~~~~~ * * To re-name several volumes atomically at one go, the %UBI_IOCRNVOL command * of the UBI character device should be used. A &struct ubi_rnvol_req object - * has to be properly filled and a pointer to it has to be passed to the IOCTL. + * has to be properly filled and a pointer to it has to be passed to the ioctl. * * UBI volume update * ~~~~~~~~~~~~~~~~~ * - * Volume update should be done via the %UBI_IOCVOLUP IOCTL command of the + * Volume update should be done via the %UBI_IOCVOLUP ioctl command of the * corresponding UBI volume character device. A pointer to a 64-bit update - * size should be passed to the IOCTL. After this, UBI expects user to write + * size should be passed to the ioctl. After this, UBI expects user to write * this number of bytes to the volume character device. The update is finished * when the claimed number of bytes is passed. So, the volume update sequence * is something like: @@ -80,14 +80,58 @@ * write(fd, buf, image_size); * close(fd); * - * Atomic eraseblock change + * Logical eraseblock erase * ~~~~~~~~~~~~~~~~~~~~~~~~ * - * Atomic eraseblock change operation is done via the %UBI_IOCEBCH IOCTL - * command of the corresponding UBI volume character device. A pointer to - * &struct ubi_leb_change_req has to be passed to the IOCTL. Then the user is - * expected to write the requested amount of bytes. This is similar to the - * "volume update" IOCTL. + * To erase a logical eraseblock, the %UBI_IOCEBER ioctl command of the + * corresponding UBI volume character device should be used. This command + * unmaps the requested logical eraseblock, makes sure the corresponding + * physical eraseblock is successfully erased, and returns. + * + * Atomic logical eraseblock change + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * Atomic logical eraseblock change operation is called using the %UBI_IOCEBCH + * ioctl command of the corresponding UBI volume character device. A pointer to + * a &struct ubi_leb_change_req object has to be passed to the ioctl. Then the + * user is expected to write the requested amount of bytes (similarly to what + * should be done in case of the "volume update" ioctl). + * + * Logical eraseblock map + * ~~~~~~~~~~~~~~~~~~~~~ + * + * To map a logical eraseblock to a physical eraseblock, the %UBI_IOCEBMAP + * ioctl command should be used. A pointer to a &struct ubi_map_req object is + * expected to be passed. The ioctl maps the requested logical eraseblock to + * a physical eraseblock and returns. Only non-mapped logical eraseblocks can + * be mapped. If the logical eraseblock specified in the request is already + * mapped to a physical eraseblock, the ioctl fails and returns error. + * + * Logical eraseblock unmap + * ~~~~~~~~~~~~~~~~~~~~~~~~ + * + * To unmap a logical eraseblock to a physical eraseblock, the %UBI_IOCEBUNMAP + * ioctl command should be used. The ioctl unmaps the logical eraseblocks, + * schedules corresponding physical eraseblock for erasure, and returns. Unlike + * the "LEB erase" command, it does not wait for the physical eraseblock being + * erased. Note, the side effect of this is that if an unclean reboot happens + * after the unmap ioctl returns, you may find the LEB mapped again to the same + * physical eraseblock after the UBI is run again. + * + * Check if logical eraseblock is mapped + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * To check if a logical eraseblock is mapped to a physical eraseblock, the + * %UBI_IOCEBISMAP ioctl command should be used. It returns %0 if the LEB is + * not mapped, and %1 if it is mapped. + * + * Set an UBI volume property + * ~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * To set an UBI volume property the %UBI_IOCSETPROP ioctl command should be + * used. A pointer to a &struct ubi_set_prop_req object is expected to be + * passed. The object describes which property should be set, and to which value + * it should be set. */ /* @@ -101,7 +145,7 @@ /* Maximum volume name length */ #define UBI_MAX_VOLUME_NAME 127 -/* IOCTL commands of UBI character devices */ +/* ioctl commands of UBI character devices */ #define UBI_IOC_MAGIC 'o' @@ -114,7 +158,7 @@ /* Re-name volumes */ #define UBI_IOCRNVOL _IOW(UBI_IOC_MAGIC, 3, struct ubi_rnvol_req) -/* IOCTL commands of the UBI control character device */ +/* ioctl commands of the UBI control character device */ #define UBI_CTRL_IOC_MAGIC 'o' @@ -123,16 +167,24 @@ /* Detach an MTD device */ #define UBI_IOCDET _IOW(UBI_CTRL_IOC_MAGIC, 65, int32_t) -/* IOCTL commands of UBI volume character devices */ +/* ioctl commands of UBI volume character devices */ #define UBI_VOL_IOC_MAGIC 'O' /* Start UBI volume update */ #define UBI_IOCVOLUP _IOW(UBI_VOL_IOC_MAGIC, 0, int64_t) -/* An eraseblock erasure command, used for debugging, disabled by default */ +/* LEB erasure command, used for debugging, disabled by default */ #define UBI_IOCEBER _IOW(UBI_VOL_IOC_MAGIC, 1, int32_t) -/* An atomic eraseblock change command */ +/* Atomic LEB change command */ #define UBI_IOCEBCH _IOW(UBI_VOL_IOC_MAGIC, 2, int32_t) +/* Map LEB command */ +#define UBI_IOCEBMAP _IOW(UBI_VOL_IOC_MAGIC, 3, struct ubi_map_req) +/* Unmap LEB command */ +#define UBI_IOCEBUNMAP _IOW(UBI_VOL_IOC_MAGIC, 4, int32_t) +/* Check if LEB is mapped command */ +#define UBI_IOCEBISMAP _IOR(UBI_VOL_IOC_MAGIC, 5, int32_t) +/* Set an UBI volume property */ +#define UBI_IOCSETPROP _IOW(UBI_VOL_IOC_MAGIC, 6, struct ubi_set_prop_req) /* Maximum MTD device name length supported by UBI */ #define MAX_UBI_MTD_NAME_LEN 127 @@ -168,6 +220,16 @@ enum { UBI_STATIC_VOLUME = 4, }; +/* + * UBI set property ioctl constants + * + * @UBI_PROP_DIRECT_WRITE: allow / disallow user to directly write and + * erase individual eraseblocks on dynamic volumes + */ +enum { + UBI_PROP_DIRECT_WRITE = 1, +}; + /** * struct ubi_attach_req - attach MTD device request. * @ubi_num: UBI device number to create @@ -305,8 +367,8 @@ struct ubi_rnvol_req { } __attribute__ ((packed)); /** - * struct ubi_leb_change_req - a data structure used in atomic logical - * eraseblock change requests. + * struct ubi_leb_change_req - a data structure used in atomic LEB change + * requests. * @lnum: logical eraseblock number to change * @bytes: how many bytes will be written to the logical eraseblock * @dtype: data type (%UBI_LONGTERM, %UBI_SHORTTERM, %UBI_UNKNOWN) @@ -319,4 +381,30 @@ struct ubi_leb_change_req { int8_t padding[7]; } __attribute__ ((packed)); +/** + * struct ubi_map_req - a data structure used in map LEB requests. + * @lnum: logical eraseblock number to unmap + * @dtype: data type (%UBI_LONGTERM, %UBI_SHORTTERM, %UBI_UNKNOWN) + * @padding: reserved for future, not used, has to be zeroed + */ +struct ubi_map_req { + int32_t lnum; + int8_t dtype; + int8_t padding[3]; +} __attribute__ ((packed)); + + +/** + * struct ubi_set_prop_req - a data structure used to set an ubi volume + * property. + * @property: property to set (%UBI_PROP_DIRECT_WRITE) + * @padding: reserved for future, not used, has to be zeroed + * @value: value to set + */ +struct ubi_set_prop_req { + uint8_t property; + uint8_t padding[7]; + uint64_t value; +} __attribute__ ((packed)); + #endif /* __UBI_USER_H__ */ diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index f44bb5c77a70..d0a043153cc6 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h @@ -182,7 +182,7 @@ static inline int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo) size = 2048; if (nr_pcpus >= 32) size = 4096; - if (sizeof(rwlock_t) != 0) { + if (sizeof(spinlock_t) != 0) { #ifdef CONFIG_NUMA if (size * sizeof(spinlock_t) > PAGE_SIZE) hashinfo->ehash_locks = vmalloc(size * sizeof(spinlock_t)); diff --git a/kernel/cgroup.c b/kernel/cgroup.c index c29831076e7a..5a54ff42874e 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -1115,8 +1115,10 @@ static void cgroup_kill_sb(struct super_block *sb) { } write_unlock(&css_set_lock); - list_del(&root->root_list); - root_count--; + if (!list_empty(&root->root_list)) { + list_del(&root->root_list); + root_count--; + } mutex_unlock(&cgroup_mutex); @@ -2434,7 +2436,9 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, err_remove: + cgroup_lock_hierarchy(root); list_del(&cgrp->sibling); + cgroup_unlock_hierarchy(root); root->number_of_cgroups--; err_destroy: @@ -2507,7 +2511,7 @@ static int cgroup_clear_css_refs(struct cgroup *cgrp) for_each_subsys(cgrp->root, ss) { struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id]; int refcnt; - do { + while (1) { /* We can only remove a CSS with a refcnt==1 */ refcnt = atomic_read(&css->refcnt); if (refcnt > 1) { @@ -2521,7 +2525,10 @@ static int cgroup_clear_css_refs(struct cgroup *cgrp) * css_tryget() to spin until we set the * CSS_REMOVED bits or abort */ - } while (atomic_cmpxchg(&css->refcnt, refcnt, 0) != refcnt); + if (atomic_cmpxchg(&css->refcnt, refcnt, 0) == refcnt) + break; + cpu_relax(); + } } done: for_each_subsys(cgrp->root, ss) { @@ -2991,20 +2998,21 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys, mutex_unlock(&cgroup_mutex); return 0; } - task_lock(tsk); - cg = tsk->cgroups; - parent = task_cgroup(tsk, subsys->subsys_id); /* Pin the hierarchy */ - if (!atomic_inc_not_zero(&parent->root->sb->s_active)) { + if (!atomic_inc_not_zero(&root->sb->s_active)) { /* We race with the final deactivate_super() */ mutex_unlock(&cgroup_mutex); return 0; } /* Keep the cgroup alive */ + task_lock(tsk); + parent = task_cgroup(tsk, subsys->subsys_id); + cg = tsk->cgroups; get_css_set(cg); task_unlock(tsk); + mutex_unlock(&cgroup_mutex); /* Now do the VFS work to create a cgroup */ @@ -3043,7 +3051,7 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys, mutex_unlock(&inode->i_mutex); put_css_set(cg); - deactivate_super(parent->root->sb); + deactivate_super(root->sb); /* The cgroup is still accessible in the VFS, but * we're not going to try to rmdir() it at this * point. */ @@ -3069,7 +3077,7 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys, mutex_lock(&cgroup_mutex); put_css_set(cg); mutex_unlock(&cgroup_mutex); - deactivate_super(parent->root->sb); + deactivate_super(root->sb); return ret; } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 4d0ea3ceba6d..8e4be9cb2a6a 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -202,6 +202,7 @@ pcg_default_flags[NR_CHARGE_TYPE] = { static void mem_cgroup_get(struct mem_cgroup *mem); static void mem_cgroup_put(struct mem_cgroup *mem); +static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem); static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, struct page_cgroup *pc, @@ -1684,7 +1685,7 @@ move_account: /* This is for making all *used* pages to be on LRU. */ lru_add_drain_all(); ret = 0; - for_each_node_state(node, N_POSSIBLE) { + for_each_node_state(node, N_HIGH_MEMORY) { for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) { enum lru_list l; for_each_lru(l) { @@ -2193,10 +2194,23 @@ static void mem_cgroup_get(struct mem_cgroup *mem) static void mem_cgroup_put(struct mem_cgroup *mem) { - if (atomic_dec_and_test(&mem->refcnt)) + if (atomic_dec_and_test(&mem->refcnt)) { + struct mem_cgroup *parent = parent_mem_cgroup(mem); __mem_cgroup_free(mem); + if (parent) + mem_cgroup_put(parent); + } } +/* + * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled. + */ +static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem) +{ + if (!mem->res.parent) + return NULL; + return mem_cgroup_from_res_counter(mem->res.parent, res); +} #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP static void __init enable_swap_cgroup(void) @@ -2235,6 +2249,13 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) if (parent && parent->use_hierarchy) { res_counter_init(&mem->res, &parent->res); res_counter_init(&mem->memsw, &parent->memsw); + /* + * We increment refcnt of the parent to ensure that we can + * safely access it on res_counter_charge/uncharge. + * This refcnt will be decremented when freeing this + * mem_cgroup(see mem_cgroup_put). + */ + mem_cgroup_get(parent); } else { res_counter_init(&mem->res, NULL); res_counter_init(&mem->memsw, NULL); diff --git a/mm/mmap.c b/mm/mmap.c index 8d95902e9a38..d3fa10a726cf 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1134,16 +1134,11 @@ munmap_back: } /* - * Can we just expand an old private anonymous mapping? - * The VM_SHARED test is necessary because shmem_zero_setup - * will create the file object for a shared anonymous map below. + * Can we just expand an old mapping? */ - if (!file && !(vm_flags & VM_SHARED)) { - vma = vma_merge(mm, prev, addr, addr + len, vm_flags, - NULL, NULL, pgoff, NULL); - if (vma) - goto out; - } + vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, NULL); + if (vma) + goto out; /* * Determine the object being mapped and call the appropriate @@ -1206,17 +1201,8 @@ munmap_back: if (vma_wants_writenotify(vma)) vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED); - if (file && vma_merge(mm, prev, addr, vma->vm_end, - vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) { - mpol_put(vma_policy(vma)); - kmem_cache_free(vm_area_cachep, vma); - fput(file); - if (vm_flags & VM_EXECUTABLE) - removed_exe_file_vma(mm); - } else { - vma_link(mm, vma, prev, rb_link, rb_parent); - file = vma->vm_file; - } + vma_link(mm, vma, prev, rb_link, rb_parent); + file = vma->vm_file; /* Once vma denies write, undo our temporary denial count */ if (correct_wcount) diff --git a/mm/swapfile.c b/mm/swapfile.c index f48b831e5e5c..7e6304dfafab 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -698,8 +698,10 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, pte_t *pte; int ret = 1; - if (mem_cgroup_try_charge_swapin(vma->vm_mm, page, GFP_KERNEL, &ptr)) + if (mem_cgroup_try_charge_swapin(vma->vm_mm, page, GFP_KERNEL, &ptr)) { ret = -ENOMEM; + goto out_nolock; + } pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) { @@ -723,6 +725,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, activate_page(page); out: pte_unmap_unlock(pte, ptl); +out_nolock: return ret; } diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 2e5f2ca3bdcd..da74b844f4ea 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -2212,10 +2212,10 @@ unsigned int skb_seq_read(unsigned int consumed, const u8 **data, return 0; next_skb: - block_limit = skb_headlen(st->cur_skb); + block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; if (abs_offset < block_limit) { - *data = st->cur_skb->data + abs_offset; + *data = st->cur_skb->data + (abs_offset - st->stepped_offset); return block_limit - abs_offset; } @@ -2250,13 +2250,14 @@ next_skb: st->frag_data = NULL; } - if (st->cur_skb->next) { - st->cur_skb = st->cur_skb->next; + if (st->root_skb == st->cur_skb && + skb_shinfo(st->root_skb)->frag_list) { + st->cur_skb = skb_shinfo(st->root_skb)->frag_list; st->frag_idx = 0; goto next_skb; - } else if (st->root_skb == st->cur_skb && - skb_shinfo(st->root_skb)->frag_list) { - st->cur_skb = skb_shinfo(st->root_skb)->frag_list; + } else if (st->cur_skb->next) { + st->cur_skb = st->cur_skb->next; + st->frag_idx = 0; goto next_skb; } diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 42a0f3dd3fd6..d722013c1cae 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c @@ -1268,6 +1268,9 @@ __be32 __init root_nfs_parse_addr(char *name) static int __init ip_auto_config(void) { __be32 addr; +#ifdef IPCONFIG_DYNAMIC + int retries = CONF_OPEN_RETRIES; +#endif #ifdef CONFIG_PROC_FS proc_net_fops_create(&init_net, "pnp", S_IRUGO, &pnp_seq_fops); @@ -1304,9 +1307,6 @@ static int __init ip_auto_config(void) #endif ic_first_dev->next) { #ifdef IPCONFIG_DYNAMIC - - int retries = CONF_OPEN_RETRIES; - if (ic_dynamic() < 0) { ic_close_devs(); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 0cd71b84e483..76b148bcb0dc 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -524,7 +524,8 @@ static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, struct tcp_splice_state *tss = rd_desc->arg.data; int ret; - ret = skb_splice_bits(skb, offset, tss->pipe, rd_desc->count, tss->flags); + ret = skb_splice_bits(skb, offset, tss->pipe, min(rd_desc->count, len), + tss->flags); if (ret > 0) rd_desc->count -= ret; return ret; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index cf5ab0581eba..b7faffe5c029 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -120,8 +120,11 @@ EXPORT_SYMBOL(sysctl_udp_wmem_min); atomic_t udp_memory_allocated; EXPORT_SYMBOL(udp_memory_allocated); +#define PORTS_PER_CHAIN (65536 / UDP_HTABLE_SIZE) + static int udp_lib_lport_inuse(struct net *net, __u16 num, const struct udp_hslot *hslot, + unsigned long *bitmap, struct sock *sk, int (*saddr_comp)(const struct sock *sk1, const struct sock *sk2)) @@ -132,12 +135,17 @@ static int udp_lib_lport_inuse(struct net *net, __u16 num, sk_nulls_for_each(sk2, node, &hslot->head) if (net_eq(sock_net(sk2), net) && sk2 != sk && - sk2->sk_hash == num && + (bitmap || sk2->sk_hash == num) && (!sk2->sk_reuse || !sk->sk_reuse) && (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && - (*saddr_comp)(sk, sk2)) - return 1; + (*saddr_comp)(sk, sk2)) { + if (bitmap) + __set_bit(sk2->sk_hash / UDP_HTABLE_SIZE, + bitmap); + else + return 1; + } return 0; } @@ -160,32 +168,47 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum, if (!snum) { int low, high, remaining; unsigned rand; - unsigned short first; + unsigned short first, last; + DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN); inet_get_local_port_range(&low, &high); remaining = (high - low) + 1; rand = net_random(); - snum = first = rand % remaining + low; - rand |= 1; - for (;;) { - hslot = &udptable->hash[udp_hashfn(net, snum)]; + first = (((u64)rand * remaining) >> 32) + low; + /* + * force rand to be an odd multiple of UDP_HTABLE_SIZE + */ + rand = (rand | 1) * UDP_HTABLE_SIZE; + for (last = first + UDP_HTABLE_SIZE; first != last; first++) { + hslot = &udptable->hash[udp_hashfn(net, first)]; + bitmap_zero(bitmap, PORTS_PER_CHAIN); spin_lock_bh(&hslot->lock); - if (!udp_lib_lport_inuse(net, snum, hslot, sk, saddr_comp)) - break; - spin_unlock_bh(&hslot->lock); + udp_lib_lport_inuse(net, snum, hslot, bitmap, sk, + saddr_comp); + + snum = first; + /* + * Iterate on all possible values of snum for this hash. + * Using steps of an odd multiple of UDP_HTABLE_SIZE + * give us randomization and full range coverage. + */ do { - snum = snum + rand; - } while (snum < low || snum > high); - if (snum == first) - goto fail; + if (low <= snum && snum <= high && + !test_bit(snum / UDP_HTABLE_SIZE, bitmap)) + goto found; + snum += rand; + } while (snum != first); + spin_unlock_bh(&hslot->lock); } + goto fail; } else { hslot = &udptable->hash[udp_hashfn(net, snum)]; spin_lock_bh(&hslot->lock); - if (udp_lib_lport_inuse(net, snum, hslot, sk, saddr_comp)) + if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, saddr_comp)) goto fail_unlock; } +found: inet_sk(sk)->num = snum; sk->sk_hash = snum; if (sk_unhashed(sk)) { diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index e92ad8455c63..f9afb452249c 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -4250,7 +4250,7 @@ static struct addrconf_sysctl_table .procname = "mc_forwarding", .data = &ipv6_devconf.mc_forwarding, .maxlen = sizeof(int), - .mode = 0644, + .mode = 0444, .proc_handler = proc_dointvec, }, #endif diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 4f433847d95f..36dff8807183 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -443,10 +443,10 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info, if (xfrm_decode_session_reverse(skb, &fl2, AF_INET6)) goto relookup_failed; - if (ip6_dst_lookup(sk, &dst2, &fl)) + if (ip6_dst_lookup(sk, &dst2, &fl2)) goto relookup_failed; - err = xfrm_lookup(net, &dst2, &fl, sk, XFRM_LOOKUP_ICMP); + err = xfrm_lookup(net, &dst2, &fl2, sk, XFRM_LOOKUP_ICMP); switch (err) { case 0: dst_release(dst); diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index 936f48946e20..f171e8dbac91 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -255,6 +255,7 @@ int ip6_mc_input(struct sk_buff *skb) * IPv6 multicast router mode is now supported ;) */ if (dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding && + !(ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) && likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) { /* * Okay, we try to forward - split and duplicate @@ -316,7 +317,6 @@ int ip6_mc_input(struct sk_buff *skb) } if (skb2) { - skb2->dev = skb2->dst->dev; ip6_mr_input(skb2); } } diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 3c51b2d827f4..d19a84b79503 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -365,7 +365,9 @@ static int pim6_rcv(struct sk_buff *skb) pim = (struct pimreghdr *)skb_transport_header(skb); if (pim->type != ((PIM_VERSION << 4) | PIM_REGISTER) || (pim->flags & PIM_NULL_REGISTER) || - (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 && + (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, + sizeof(*pim), IPPROTO_PIM, + csum_partial((void *)pim, sizeof(*pim), 0)) && csum_fold(skb_checksum(skb, 0, skb->len, 0)))) goto drop; @@ -392,7 +394,7 @@ static int pim6_rcv(struct sk_buff *skb) skb_pull(skb, (u8 *)encap - skb->data); skb_reset_network_header(skb); skb->dev = reg_dev; - skb->protocol = htons(ETH_P_IP); + skb->protocol = htons(ETH_P_IPV6); skb->ip_summed = 0; skb->pkt_type = PACKET_HOST; dst_release(skb->dst); @@ -481,6 +483,7 @@ static int mif6_delete(struct net *net, int vifi) { struct mif_device *v; struct net_device *dev; + struct inet6_dev *in6_dev; if (vifi < 0 || vifi >= net->ipv6.maxvif) return -EADDRNOTAVAIL; @@ -513,6 +516,10 @@ static int mif6_delete(struct net *net, int vifi) dev_set_allmulti(dev, -1); + in6_dev = __in6_dev_get(dev); + if (in6_dev) + in6_dev->cnf.mc_forwarding--; + if (v->flags & MIFF_REGISTER) unregister_netdevice(dev); @@ -622,6 +629,7 @@ static int mif6_add(struct net *net, struct mif6ctl *vifc, int mrtsock) int vifi = vifc->mif6c_mifi; struct mif_device *v = &net->ipv6.vif6_table[vifi]; struct net_device *dev; + struct inet6_dev *in6_dev; int err; /* Is vif busy ? */ @@ -662,6 +670,10 @@ static int mif6_add(struct net *net, struct mif6ctl *vifc, int mrtsock) return -EINVAL; } + in6_dev = __in6_dev_get(dev); + if (in6_dev) + in6_dev->cnf.mc_forwarding++; + /* * Fill in the VIF structures */ @@ -838,8 +850,6 @@ static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi, skb->dst = dst_clone(pkt->dst); skb->ip_summed = CHECKSUM_UNNECESSARY; - - skb_pull(skb, sizeof(struct ipv6hdr)); } if (net->ipv6.mroute6_sk == NULL) { @@ -1222,8 +1232,10 @@ static int ip6mr_sk_init(struct sock *sk) rtnl_lock(); write_lock_bh(&mrt_lock); - if (likely(net->ipv6.mroute6_sk == NULL)) + if (likely(net->ipv6.mroute6_sk == NULL)) { net->ipv6.mroute6_sk = sk; + net->ipv6.devconf_all->mc_forwarding++; + } else err = -EADDRINUSE; write_unlock_bh(&mrt_lock); @@ -1242,6 +1254,7 @@ int ip6mr_sk_done(struct sock *sk) if (sk == net->ipv6.mroute6_sk) { write_lock_bh(&mrt_lock); net->ipv6.mroute6_sk = NULL; + net->ipv6.devconf_all->mc_forwarding--; write_unlock_bh(&mrt_lock); mroute_clean_tables(net); diff --git a/net/ipv6/route.c b/net/ipv6/route.c index c4a59824ac2c..9c574235c905 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -794,7 +794,7 @@ void ip6_route_input(struct sk_buff *skb) .proto = iph->nexthdr, }; - if (rt6_need_strict(&iph->daddr)) + if (rt6_need_strict(&iph->daddr) && skb->dev->type != ARPHRD_PIMREG) flags |= RT6_LOOKUP_F_IFACE; skb->dst = fib6_rule_lookup(net, &fl, flags, ip6_pol_route_input); diff --git a/net/wimax/debugfs.c b/net/wimax/debugfs.c index 87cf4430079c..94d216a46407 100644 --- a/net/wimax/debugfs.c +++ b/net/wimax/debugfs.c @@ -28,17 +28,6 @@ #include "debug-levels.h" -/* Debug framework control of debug levels */ -struct d_level D_LEVEL[] = { - D_SUBMODULE_DEFINE(debugfs), - D_SUBMODULE_DEFINE(id_table), - D_SUBMODULE_DEFINE(op_msg), - D_SUBMODULE_DEFINE(op_reset), - D_SUBMODULE_DEFINE(op_rfkill), - D_SUBMODULE_DEFINE(stack), -}; -size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL); - #define __debugfs_register(prefix, name, parent) \ do { \ result = d_level_register_debugfs(prefix, name, parent); \ diff --git a/net/wimax/stack.c b/net/wimax/stack.c index d4da92f8981a..3869c0327882 100644 --- a/net/wimax/stack.c +++ b/net/wimax/stack.c @@ -516,6 +516,19 @@ void wimax_dev_rm(struct wimax_dev *wimax_dev) } EXPORT_SYMBOL_GPL(wimax_dev_rm); + +/* Debug framework control of debug levels */ +struct d_level D_LEVEL[] = { + D_SUBMODULE_DEFINE(debugfs), + D_SUBMODULE_DEFINE(id_table), + D_SUBMODULE_DEFINE(op_msg), + D_SUBMODULE_DEFINE(op_reset), + D_SUBMODULE_DEFINE(op_rfkill), + D_SUBMODULE_DEFINE(stack), +}; +size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL); + + struct genl_family wimax_gnl_family = { .id = GENL_ID_GENERATE, .name = "WiMAX", diff --git a/net/wireless/reg.c b/net/wireless/reg.c index bc494cef2102..85c9034c59b2 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -498,6 +498,7 @@ static struct ieee80211_regdomain *country_ie_2_rd( * calculate the number of reg rules we will need. We will need one * for each channel subband */ while (country_ie_len >= 3) { + int end_channel = 0; struct ieee80211_country_ie_triplet *triplet = (struct ieee80211_country_ie_triplet *) country_ie; int cur_sub_max_channel = 0, cur_channel = 0; @@ -509,9 +510,25 @@ static struct ieee80211_regdomain *country_ie_2_rd( continue; } + /* 2 GHz */ + if (triplet->chans.first_channel <= 14) + end_channel = triplet->chans.first_channel + + triplet->chans.num_channels; + else + /* + * 5 GHz -- For example in country IEs if the first + * channel given is 36 and the number of channels is 4 + * then the individual channel numbers defined for the + * 5 GHz PHY by these parameters are: 36, 40, 44, and 48 + * and not 36, 37, 38, 39. + * + * See: http://tinyurl.com/11d-clarification + */ + end_channel = triplet->chans.first_channel + + (4 * (triplet->chans.num_channels - 1)); + cur_channel = triplet->chans.first_channel; - cur_sub_max_channel = ieee80211_channel_to_frequency( - cur_channel + triplet->chans.num_channels); + cur_sub_max_channel = end_channel; /* Basic sanity check */ if (cur_sub_max_channel < cur_channel) @@ -590,15 +607,6 @@ static struct ieee80211_regdomain *country_ie_2_rd( end_channel = triplet->chans.first_channel + triplet->chans.num_channels; else - /* - * 5 GHz -- For example in country IEs if the first - * channel given is 36 and the number of channels is 4 - * then the individual channel numbers defined for the - * 5 GHz PHY by these parameters are: 36, 40, 44, and 48 - * and not 36, 37, 38, 39. - * - * See: http://tinyurl.com/11d-clarification - */ end_channel = triplet->chans.first_channel + (4 * (triplet->chans.num_channels - 1)); @@ -1276,7 +1284,7 @@ static void reg_country_ie_process_debug( if (intersected_rd) { printk(KERN_DEBUG "cfg80211: We intersect both of these " "and get:\n"); - print_regdomain_info(rd); + print_regdomain_info(intersected_rd); return; } printk(KERN_DEBUG "cfg80211: Intersection between both failed\n"); diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 75de40aaab0a..0177ef8f4c9e 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -347,6 +347,7 @@ static int conexant_mux_enum_put(struct snd_kcontrol *kcontrol, &spec->cur_mux[adc_idx]); } +#ifdef CONFIG_SND_JACK static int conexant_add_jack(struct hda_codec *codec, hda_nid_t nid, int type) { @@ -394,7 +395,6 @@ static void conexant_report_jack(struct hda_codec *codec, hda_nid_t nid) static int conexant_init_jacks(struct hda_codec *codec) { -#ifdef CONFIG_SND_JACK struct conexant_spec *spec = codec->spec; int i; @@ -422,10 +422,19 @@ static int conexant_init_jacks(struct hda_codec *codec) ++hv; } } -#endif return 0; } +#else +static inline void conexant_report_jack(struct hda_codec *codec, hda_nid_t nid) +{ +} + +static inline int conexant_init_jacks(struct hda_codec *codec) +{ + return 0; +} +#endif static int conexant_init(struct hda_codec *codec) { @@ -1566,6 +1575,7 @@ static struct snd_pci_quirk cxt5047_cfg_tbl[] = { SND_PCI_QUIRK(0x103c, 0x30a5, "HP DV5200T/DV8000T", CXT5047_LAPTOP_HP), SND_PCI_QUIRK(0x103c, 0x30b2, "HP DV2000T/DV3000T", CXT5047_LAPTOP), SND_PCI_QUIRK(0x103c, 0x30b5, "HP DV2000Z", CXT5047_LAPTOP), + SND_PCI_QUIRK(0x103c, 0x30cf, "HP DV6700", CXT5047_LAPTOP), SND_PCI_QUIRK(0x1179, 0xff31, "Toshiba P100", CXT5047_LAPTOP_EAPD), {} }; diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 5d249a547fbf..7884a4e07061 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -7018,6 +7018,7 @@ static int patch_alc882(struct hda_codec *codec) case 0x106b00a4: /* MacbookPro4,1 */ case 0x106b2c00: /* Macbook Pro rev3 */ case 0x106b3600: /* Macbook 3.1 */ + case 0x106b3800: /* MacbookPro4,1 - latter revision */ board_config = ALC885_MBP3; break; default: diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index 3dd4eee70b7c..b787b3cc096f 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c @@ -2539,6 +2539,8 @@ static int stac92xx_build_pcms(struct hda_codec *codec) info->name = "STAC92xx Analog"; info->stream[SNDRV_PCM_STREAM_PLAYBACK] = stac92xx_pcm_analog_playback; + info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid = + spec->multiout.dac_nids[0]; info->stream[SNDRV_PCM_STREAM_CAPTURE] = stac92xx_pcm_analog_capture; info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = spec->adc_nids[0]; info->stream[SNDRV_PCM_STREAM_CAPTURE].substreams = spec->num_adcs; diff --git a/sound/soc/codecs/wm8753.c b/sound/soc/codecs/wm8753.c index 6c21b50c9375..77620ab98756 100644 --- a/sound/soc/codecs/wm8753.c +++ b/sound/soc/codecs/wm8753.c @@ -1451,7 +1451,14 @@ static const struct snd_soc_dai wm8753_all_dai[] = { }, }; -struct snd_soc_dai wm8753_dai[2]; +struct snd_soc_dai wm8753_dai[] = { + { + .name = "WM8753 DAI 0", + }, + { + .name = "WM8753 DAI 1", + }, +}; EXPORT_SYMBOL_GPL(wm8753_dai); static void wm8753_set_dai_mode(struct snd_soc_codec *codec, unsigned int mode) diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c index ec5e18a78758..05dd5abcddf4 100644 --- a/sound/soc/omap/omap-mcbsp.c +++ b/sound/soc/omap/omap-mcbsp.c @@ -302,6 +302,10 @@ static int omap_mcbsp_dai_set_dai_fmt(struct snd_soc_dai *cpu_dai, regs->spcr1 |= RINTM(3); regs->rcr2 |= RFIG; regs->xcr2 |= XFIG; + if (cpu_is_omap2430() || cpu_is_omap34xx()) { + regs->xccr = DXENDLY(1) | XDMAEN; + regs->rccr = RFULL_CYCLE | RDMAEN; + } switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: |